From 7968b45e58e6dfee3e732276a6cb7eab889a4333 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Tue, 9 Sep 2014 22:39:42 -0700 Subject: [PATCH] vendor dependencies with godep dependencies are vendored into Godeps/_workspace and commit versions are recorded in Godeps.json update datastore to e89f0511 update go.crypto --- Godeps/Godeps.json | 71 + Godeps/Readme | 5 + Godeps/_workspace/.gitignore | 2 + .../src/bazil.org/fuse/.gitattributes | 2 + .../_workspace/src/bazil.org/fuse/.gitignore | 8 + Godeps/_workspace/src/bazil.org/fuse/LICENSE | 93 + .../_workspace/src/bazil.org/fuse/README.md | 23 + Godeps/_workspace/src/bazil.org/fuse/debug.go | 14 + .../src/bazil.org/fuse/doc/.gitignore | 4 + .../src/bazil.org/fuse/doc/README.md | 6 + .../fuse/doc/mount-linux-error-init.seq | 32 + .../fuse/doc/mount-linux-error-init.seq.png | Bin 0 -> 29163 bytes .../src/bazil.org/fuse/doc/mount-linux.seq | 41 + .../bazil.org/fuse/doc/mount-linux.seq.png | Bin 0 -> 44615 bytes .../fuse/doc/mount-osx-error-init.seq | 32 + .../fuse/doc/mount-osx-error-init.seq.png | Bin 0 -> 32618 bytes .../src/bazil.org/fuse/doc/mount-osx.seq | 45 + .../src/bazil.org/fuse/doc/mount-osx.seq.png | Bin 0 -> 51408 bytes .../src/bazil.org/fuse/doc/mount-sequence.md | 30 + .../src/bazil.org/fuse/doc/writing-docs.md | 16 + .../src/bazil.org/fuse/error_darwin.go | 36 + .../src/bazil.org/fuse/error_std.go | 7 + .../src/bazil.org/fuse/fs/bench/bench_test.go | 267 ++ .../src/bazil.org/fuse/fs/bench/doc.go | 5 + .../src/bazil.org/fuse/fs/fstestutil/debug.go | 65 + .../bazil.org/fuse/fs/fstestutil/mounted.go | 113 + .../fuse/fs/fstestutil/record/buffer.go | 28 + .../fuse/fs/fstestutil/record/record.go | 381 +++ .../fuse/fs/fstestutil/record/wait.go | 54 + .../_workspace/src/bazil.org/fuse/fs/serve.go | 1316 +++++++++ .../src/bazil.org/fuse/fs/serve_test.go | 1763 +++++++++++++ .../_workspace/src/bazil.org/fuse/fs/tree.go | 96 + Godeps/_workspace/src/bazil.org/fuse/fuse.go | 1906 +++++++++++++ .../src/bazil.org/fuse/fuse_kernel.go | 639 +++++ .../src/bazil.org/fuse/fuse_kernel_darwin.go | 86 + .../src/bazil.org/fuse/fuse_kernel_linux.go | 70 + .../src/bazil.org/fuse/fuse_kernel_std.go | 1 + .../src/bazil.org/fuse/fuse_kernel_test.go | 31 + .../src/bazil.org/fuse/fuseutil/fuseutil.go | 20 + .../src/bazil.org/fuse/hellofs/hello.go | 89 + .../src/bazil.org/fuse/mount_darwin.go | 117 + .../src/bazil.org/fuse/mount_linux.go | 67 + .../src/bazil.org/fuse/syscallx/doc.go | 13 + .../src/bazil.org/fuse/syscallx/generate | 34 + .../src/bazil.org/fuse/syscallx/msync.go | 9 + .../src/bazil.org/fuse/syscallx/msync_386.go | 24 + .../bazil.org/fuse/syscallx/msync_amd64.go | 24 + .../src/bazil.org/fuse/syscallx/syscallx.go | 4 + .../bazil.org/fuse/syscallx/syscallx_std.go | 26 + .../bazil.org/fuse/syscallx/xattr_darwin.go | 38 + .../fuse/syscallx/xattr_darwin_386.go | 97 + .../fuse/syscallx/xattr_darwin_amd64.go | 97 + .../_workspace/src/bazil.org/fuse/unmount.go | 6 + .../src/bazil.org/fuse/unmount_linux.go | 21 + .../src/bazil.org/fuse/unmount_std.go | 17 + .../code.google.com/p/go-uuid/uuid/LICENSE | 27 + .../src/code.google.com/p/go-uuid/uuid/dce.go | 84 + .../src/code.google.com/p/go-uuid/uuid/doc.go | 8 + .../code.google.com/p/go-uuid/uuid/hash.go | 53 + .../code.google.com/p/go-uuid/uuid/node.go | 101 + .../code.google.com/p/go-uuid/uuid/time.go | 132 + .../code.google.com/p/go-uuid/uuid/util.go | 43 + .../code.google.com/p/go-uuid/uuid/uuid.go | 163 ++ .../p/go-uuid/uuid/uuid_test.go | 390 +++ .../p/go-uuid/uuid/version1.go | 41 + .../p/go-uuid/uuid/version4.go | 25 + .../p/go.crypto/sha3/keccakf.go | 165 ++ .../code.google.com/p/go.crypto/sha3/sha3.go | 213 ++ .../p/go.crypto/sha3/sha3_test.go | 270 ++ .../p/gogoprotobuf/proto/Makefile | 40 + .../p/gogoprotobuf/proto/all_test.go | 1948 ++++++++++++++ .../p/gogoprotobuf/proto/clone.go | 174 ++ .../p/gogoprotobuf/proto/clone_test.go | 186 ++ .../p/gogoprotobuf/proto/decode.go | 726 +++++ .../p/gogoprotobuf/proto/decode_gogo.go | 220 ++ .../p/gogoprotobuf/proto/encode.go | 961 +++++++ .../p/gogoprotobuf/proto/encode_gogo.go | 361 +++ .../p/gogoprotobuf/proto/equal.go | 241 ++ .../p/gogoprotobuf/proto/equal_test.go | 166 ++ .../p/gogoprotobuf/proto/extensions.go | 460 ++++ .../p/gogoprotobuf/proto/extensions_gogo.go | 189 ++ .../p/gogoprotobuf/proto/extensions_test.go | 60 + .../p/gogoprotobuf/proto/lib.go | 740 ++++++ .../p/gogoprotobuf/proto/lib_gogo.go | 40 + .../p/gogoprotobuf/proto/message_set.go | 216 ++ .../p/gogoprotobuf/proto/pointer_reflect.go | 384 +++ .../p/gogoprotobuf/proto/pointer_unsafe.go | 218 ++ .../gogoprotobuf/proto/pointer_unsafe_gogo.go | 166 ++ .../p/gogoprotobuf/proto/properties.go | 670 +++++ .../p/gogoprotobuf/proto/properties_gogo.go | 107 + .../p/gogoprotobuf/proto/size2_test.go | 63 + .../p/gogoprotobuf/proto/size_test.go | 118 + .../p/gogoprotobuf/proto/skip_gogo.go | 117 + .../p/gogoprotobuf/proto/testdata/Makefile | 47 + .../proto/testdata/golden_test.go | 86 + .../p/gogoprotobuf/proto/testdata/test.pb.go | 2324 ++++++++++++++++ .../proto/testdata/test.pb.go.golden | 1737 ++++++++++++ .../p/gogoprotobuf/proto/testdata/test.proto | 420 +++ .../p/gogoprotobuf/proto/text.go | 736 ++++++ .../p/gogoprotobuf/proto/text_gogo.go | 55 + .../p/gogoprotobuf/proto/text_parser.go | 727 +++++ .../p/gogoprotobuf/proto/text_parser_test.go | 462 ++++ .../p/gogoprotobuf/proto/text_test.go | 408 +++ .../p/goprotobuf/proto/Makefile | 40 + .../p/goprotobuf/proto/all_test.go | 1979 ++++++++++++++ .../p/goprotobuf/proto/clone.go | 169 ++ .../p/goprotobuf/proto/clone_test.go | 186 ++ .../p/goprotobuf/proto/decode.go | 721 +++++ .../p/goprotobuf/proto/encode.go | 1054 ++++++++ .../p/goprotobuf/proto/equal.go | 241 ++ .../p/goprotobuf/proto/equal_test.go | 166 ++ .../p/goprotobuf/proto/extensions.go | 351 +++ .../p/goprotobuf/proto/extensions_test.go | 60 + .../code.google.com/p/goprotobuf/proto/lib.go | 740 ++++++ .../p/goprotobuf/proto/message_set.go | 229 ++ .../p/goprotobuf/proto/message_set_test.go | 66 + .../p/goprotobuf/proto/pointer_reflect.go | 384 +++ .../p/goprotobuf/proto/pointer_unsafe.go | 218 ++ .../p/goprotobuf/proto/properties.go | 658 +++++ .../p/goprotobuf/proto/size2_test.go | 63 + .../p/goprotobuf/proto/size_test.go | 120 + .../p/goprotobuf/proto/testdata/Makefile | 50 + .../goprotobuf/proto/testdata/golden_test.go | 86 + .../p/goprotobuf/proto/testdata/test.pb.go | 2350 +++++++++++++++++ .../p/goprotobuf/proto/testdata/test.proto | 428 +++ .../p/goprotobuf/proto/text.go | 701 +++++ .../p/goprotobuf/proto/text_parser.go | 684 +++++ .../p/goprotobuf/proto/text_parser_test.go | 462 ++++ .../p/goprotobuf/proto/text_test.go | 408 +++ .../p/snappy-go/snappy/decode.go | 124 + .../p/snappy-go/snappy/encode.go | 174 ++ .../p/snappy-go/snappy/snappy.go | 38 + .../p/snappy-go/snappy/snappy_test.go | 261 ++ .../src/github.com/gonuts/flag/LICENSE | 27 + .../src/github.com/gonuts/flag/README.md | 6 + .../github.com/gonuts/flag/example_test.go | 83 + .../src/github.com/gonuts/flag/export_test.go | 22 + .../src/github.com/gonuts/flag/flag.go | 816 ++++++ .../src/github.com/gonuts/flag/flag_test.go | 288 ++ .../src/github.com/jbenet/commander/AUTHORS | 11 + .../github.com/jbenet/commander/CONTRIBUTORS | 31 + .../src/github.com/jbenet/commander/LICENSE | 27 + .../src/github.com/jbenet/commander/README.md | 107 + .../github.com/jbenet/commander/commands.go | 358 +++ .../commander/examples/my-cmd/cmd_cmd1.go | 34 + .../commander/examples/my-cmd/cmd_cmd2.go | 34 + .../commander/examples/my-cmd/cmd_subcmd1.go | 18 + .../examples/my-cmd/cmd_subcmd1_cmd1.go | 34 + .../examples/my-cmd/cmd_subcmd1_cmd2.go | 34 + .../commander/examples/my-cmd/cmd_subcmd2.go | 22 + .../examples/my-cmd/cmd_subcmd2_cmd1.go | 34 + .../examples/my-cmd/cmd_subcmd2_cmd2.go | 34 + .../jbenet/commander/examples/my-cmd/main.go | 33 + .../github.com/jbenet/datastore.go/LICENSE | 21 + .../github.com/jbenet/datastore.go/Makefile | 12 + .../github.com/jbenet/datastore.go/README.md | 15 + .../jbenet/datastore.go/basic_ds.go | 118 + .../jbenet/datastore.go/datastore.go | 87 + .../jbenet/datastore.go/elastigo/datastore.go | 126 + .../src/github.com/jbenet/datastore.go/key.go | 189 ++ .../jbenet/datastore.go/key_test.go | 127 + .../jbenet/datastore.go/leveldb/datastore.go | 78 + .../src/github.com/jbenet/go-base58/LICENSE | 13 + .../src/github.com/jbenet/go-base58/README.md | 66 + .../src/github.com/jbenet/go-base58/base58.go | 90 + .../jbenet/go-base58/base58_test.go | 96 + .../src/github.com/jbenet/go-base58/doc.go | 20 + .../src/github.com/jbenet/go-msgio/README.md | 78 + .../src/github.com/jbenet/go-msgio/chan.go | 85 + .../github.com/jbenet/go-msgio/chan_test.go | 110 + .../src/github.com/jbenet/go-msgio/msgio.go | 111 + .../github.com/jbenet/go-msgio/msgio_test.go | 54 + .../github.com/jbenet/go-multiaddr/README.md | 66 + .../github.com/jbenet/go-multiaddr/codec.go | 96 + .../github.com/jbenet/go-multiaddr/index.go | 102 + .../jbenet/go-multiaddr/multiaddr_test.go | 129 + .../jbenet/go-multiaddr/protocols.csv | 9 + .../jbenet/go-multiaddr/protocols.go | 51 + .../github.com/jbenet/go-multihash/README.md | 43 + .../jbenet/go-multihash/multihash.go | 161 ++ .../jbenet/go-multihash/multihash_test.go | 192 ++ .../src/github.com/jbenet/go-multihash/sum.go | 68 + .../jbenet/go-multihash/sum_test.go | 59 + .../jbenet/go-multihash/test/foo.go | 21 + .../syndtr/goleveldb/leveldb/batch.go | 216 ++ .../syndtr/goleveldb/leveldb/batch_test.go | 120 + .../syndtr/goleveldb/leveldb/bench_test.go | 464 ++++ .../syndtr/goleveldb/leveldb/cache/cache.go | 158 ++ .../goleveldb/leveldb/cache/cache_test.go | 547 ++++ .../goleveldb/leveldb/cache/lru_cache.go | 382 +++ .../syndtr/goleveldb/leveldb/comparer.go | 75 + .../leveldb/comparer/bytes_comparer.go | 51 + .../goleveldb/leveldb/comparer/comparer.go | 57 + .../syndtr/goleveldb/leveldb/config.go | 40 + .../syndtr/goleveldb/leveldb/corrupt_test.go | 472 ++++ .../github.com/syndtr/goleveldb/leveldb/db.go | 834 ++++++ .../syndtr/goleveldb/leveldb/db_compaction.go | 689 +++++ .../syndtr/goleveldb/leveldb/db_iter.go | 331 +++ .../syndtr/goleveldb/leveldb/db_snapshot.go | 169 ++ .../syndtr/goleveldb/leveldb/db_state.go | 202 ++ .../syndtr/goleveldb/leveldb/db_test.go | 1890 +++++++++++++ .../syndtr/goleveldb/leveldb/db_util.go | 97 + .../syndtr/goleveldb/leveldb/db_write.go | 290 ++ .../syndtr/goleveldb/leveldb/doc.go | 90 + .../syndtr/goleveldb/leveldb/error.go | 38 + .../syndtr/goleveldb/leveldb/external_test.go | 58 + .../syndtr/goleveldb/leveldb/filter.go | 31 + .../syndtr/goleveldb/leveldb/filter/bloom.go | 116 + .../goleveldb/leveldb/filter/bloom_test.go | 142 + .../syndtr/goleveldb/leveldb/filter/filter.go | 60 + .../goleveldb/leveldb/go13_bench_test.go | 58 + .../goleveldb/leveldb/iterator/array_iter.go | 158 ++ .../leveldb/iterator/array_iter_test.go | 30 + .../leveldb/iterator/indexed_iter.go | 221 ++ .../leveldb/iterator/indexed_iter_test.go | 83 + .../syndtr/goleveldb/leveldb/iterator/iter.go | 142 + .../leveldb/iterator/iter_suite_test.go | 17 + .../goleveldb/leveldb/iterator/merged_iter.go | 307 +++ .../leveldb/iterator/merged_iter_test.go | 60 + .../goleveldb/leveldb/journal/journal.go | 520 ++++ .../goleveldb/leveldb/journal/journal_test.go | 818 ++++++ .../syndtr/goleveldb/leveldb/key.go | 139 + .../syndtr/goleveldb/leveldb/key_test.go | 123 + .../goleveldb/leveldb/leveldb_suite_test.go | 20 + .../goleveldb/leveldb/memdb/bench_test.go | 75 + .../syndtr/goleveldb/leveldb/memdb/memdb.go | 452 ++++ .../leveldb/memdb/memdb_suite_test.go | 17 + .../goleveldb/leveldb/memdb/memdb_test.go | 135 + .../syndtr/goleveldb/leveldb/opt/options.go | 326 +++ .../syndtr/goleveldb/leveldb/options.go | 41 + .../syndtr/goleveldb/leveldb/session.go | 396 +++ .../goleveldb/leveldb/session_record.go | 308 +++ .../goleveldb/leveldb/session_record_test.go | 62 + .../syndtr/goleveldb/leveldb/session_util.go | 248 ++ .../goleveldb/leveldb/storage/file_storage.go | 534 ++++ .../leveldb/storage/file_storage_plan9.go | 52 + .../leveldb/storage/file_storage_solaris.go | 68 + .../leveldb/storage/file_storage_test.go | 142 + .../leveldb/storage/file_storage_unix.go | 63 + .../leveldb/storage/file_storage_windows.go | 69 + .../goleveldb/leveldb/storage/mem_storage.go | 203 ++ .../leveldb/storage/mem_storage_test.go | 66 + .../goleveldb/leveldb/storage/storage.go | 127 + .../syndtr/goleveldb/leveldb/storage_test.go | 459 ++++ .../syndtr/goleveldb/leveldb/table.go | 469 ++++ .../goleveldb/leveldb/table/block_test.go | 131 + .../syndtr/goleveldb/leveldb/table/reader.go | 934 +++++++ .../syndtr/goleveldb/leveldb/table/table.go | 177 ++ .../leveldb/table/table_suite_test.go | 17 + .../goleveldb/leveldb/table/table_test.go | 121 + .../syndtr/goleveldb/leveldb/table/writer.go | 379 +++ .../syndtr/goleveldb/leveldb/testutil/db.go | 216 ++ .../syndtr/goleveldb/leveldb/testutil/iter.go | 327 +++ .../syndtr/goleveldb/leveldb/testutil/kv.go | 352 +++ .../goleveldb/leveldb/testutil/kvtest.go | 136 + .../goleveldb/leveldb/testutil/storage.go | 585 ++++ .../syndtr/goleveldb/leveldb/testutil/util.go | 157 ++ .../syndtr/goleveldb/leveldb/testutil_test.go | 59 + .../syndtr/goleveldb/leveldb/util.go | 91 + .../syndtr/goleveldb/leveldb/util/buffer.go | 293 ++ .../goleveldb/leveldb/util/buffer_pool.go | 205 ++ .../goleveldb/leveldb/util/buffer_test.go | 369 +++ .../syndtr/goleveldb/leveldb/util/crc32.go | 30 + .../syndtr/goleveldb/leveldb/util/hash.go | 48 + .../syndtr/goleveldb/leveldb/util/pool.go | 21 + .../goleveldb/leveldb/util/pool_legacy.go | 33 + .../syndtr/goleveldb/leveldb/util/range.go | 32 + .../syndtr/goleveldb/leveldb/util/util.go | 49 + .../syndtr/goleveldb/leveldb/version.go | 436 +++ Makefile | 11 + bitswap/bitswap.go | 4 +- bitswap/message.pb.go | 2 +- blocks/blocks.go | 2 +- blockservice/blocks_test.go | 2 +- blockservice/blockservice.go | 4 +- cmd/ipfs/add.go | 4 +- cmd/ipfs/cat.go | 4 +- cmd/ipfs/commands.go | 2 +- cmd/ipfs/config.go | 4 +- cmd/ipfs/init.go | 4 +- cmd/ipfs/ipfs.go | 4 +- cmd/ipfs/ls.go | 4 +- cmd/ipfs/mount_unix.go | 4 +- cmd/ipfs/mount_windows.go | 4 +- cmd/ipfs/refs.go | 6 +- cmd/ipfs/version.go | 2 +- core/core.go | 6 +- core/datastore.go | 4 +- crypto/encode.pb.go | 2 +- crypto/key.go | 2 +- crypto/rsa.go | 2 +- fuse/readonly/readonly_unix.go | 4 +- identify/identify.go | 2 +- identify/message.pb.go | 2 +- merkledag/coding.go | 2 +- merkledag/dagreader.go | 2 +- merkledag/data.pb.go | 2 +- merkledag/merkledag.go | 4 +- merkledag/node.pb.go | 6 +- merkledag/nodepb_test.go | 8 +- path/path.go | 2 +- peer/peer.go | 6 +- peer/peer_test.go | 4 +- routing/dht/Message.go | 2 +- routing/dht/dht.go | 6 +- routing/dht/dht_test.go | 4 +- routing/dht/ext_test.go | 6 +- routing/dht/messages.pb.go | 2 +- routing/dht/routing.go | 4 +- swarm/conn.go | 4 +- swarm/conn_test.go | 4 +- swarm/interface.go | 2 +- swarm/mes_wrapper.pb.go | 2 +- swarm/swarm.go | 4 +- swarm/swarm_test.go | 2 +- swarm/wrapper.go | 2 +- util/util.go | 4 +- util/util_test.go | 2 +- 318 files changed, 62955 insertions(+), 83 deletions(-) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/src/bazil.org/fuse/.gitattributes create mode 100644 Godeps/_workspace/src/bazil.org/fuse/.gitignore create mode 100644 Godeps/_workspace/src/bazil.org/fuse/LICENSE create mode 100644 Godeps/_workspace/src/bazil.org/fuse/README.md create mode 100644 Godeps/_workspace/src/bazil.org/fuse/debug.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/.gitignore create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/README.md create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq.png create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq.png create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq.png create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx.seq create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx.seq.png create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/mount-sequence.md create mode 100644 Godeps/_workspace/src/bazil.org/fuse/doc/writing-docs.md create mode 100644 Godeps/_workspace/src/bazil.org/fuse/error_darwin.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/error_std.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/bench/bench_test.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/bench/doc.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/debug.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/mounted.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record/buffer.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record/record.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record/wait.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/serve.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/serve_test.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fs/tree.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuse.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuse_kernel.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_darwin.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_linux.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_std.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_test.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/fuseutil/fuseutil.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/hellofs/hello.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/mount_darwin.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/mount_linux.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/doc.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/generate create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/msync.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_386.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_amd64.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_386.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_amd64.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/unmount.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/unmount_linux.go create mode 100644 Godeps/_workspace/src/bazil.org/fuse/unmount_std.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/keccakf.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/Makefile create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/all_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/message_set.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_reflect.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size2_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/skip_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/Makefile create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/golden_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go.golden create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.proto create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_gogo.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go create mode 100644 Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go create mode 100644 Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go create mode 100644 Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go create mode 100644 Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/flag/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gonuts/flag/README.md create mode 100644 Godeps/_workspace/src/github.com/gonuts/flag/example_test.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/flag/export_test.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/flag/flag.go create mode 100644 Godeps/_workspace/src/github.com/gonuts/flag/flag_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/CONTRIBUTORS create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/LICENSE create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/commands.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd1.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd2.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd1.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd2.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd1.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd2.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/main.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/LICENSE create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/Makefile create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/elastigo/datastore.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/key.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-base58/LICENSE create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-base58/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-base58/base58.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-base58/base58_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-base58/doc.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-msgio/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-msgio/chan.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-msgio/chan_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.csv create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multihash/README.md create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multihash/sum.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multihash/sum_test.go create mode 100644 Godeps/_workspace/src/github.com/jbenet/go-multihash/test/foo.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go create mode 100644 Makefile diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 000000000..0fb6cb109 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,71 @@ +{ + "ImportPath": "github.com/jbenet/go-ipfs", + "GoVersion": "go1.3.1", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "bazil.org/fuse", + "Rev": "a04507d54fc3610d38ee951402d8c4acab56c7b1" + }, + { + "ImportPath": "code.google.com/p/go-uuid/uuid", + "Comment": "null-12", + "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9" + }, + { + "ImportPath": "code.google.com/p/go.crypto/sha3", + "Comment": "null-219", + "Rev": "00a7d3b31bbab5795b4a51933c04fc2768242970" + }, + { + "ImportPath": "code.google.com/p/gogoprotobuf/proto", + "Rev": "6c980277330804e94257ac7ef70a3adbe1641059" + }, + { + "ImportPath": "code.google.com/p/goprotobuf/proto", + "Comment": "go.r60-152", + "Rev": "36be16571e14f67e114bb0af619e5de2c1591679" + }, + { + "ImportPath": "code.google.com/p/snappy-go/snappy", + "Comment": "null-15", + "Rev": "12e4b4183793ac4b061921e7980845e750679fd0" + }, + { + "ImportPath": "github.com/gonuts/flag", + "Rev": "741a6cbd37a30dedc93f817e7de6aaf0ca38a493" + }, + { + "ImportPath": "github.com/jbenet/commander", + "Rev": "e0cf317891f0ab6f1ac64dfcb754b4fb5e69f7df" + }, + { + "ImportPath": "github.com/jbenet/datastore.go", + "Rev": "e89f0511689bb2d0608496e15491f241842de085" + }, + { + "ImportPath": "github.com/jbenet/go-base58", + "Rev": "568a28d73fd97651d3442392036a658b6976eed5" + }, + { + "ImportPath": "github.com/jbenet/go-msgio", + "Rev": "c9069ab79c95aa0686347b516972c7329c4391f2" + }, + { + "ImportPath": "github.com/jbenet/go-multiaddr", + "Comment": "0.1.0-1-g99196c0", + "Rev": "99196c0d231f83eea7f6e47cf59cbb5a0b86b358" + }, + { + "ImportPath": "github.com/jbenet/go-multihash", + "Comment": "0.1.0-5-g1976046", + "Rev": "1976046c2b0db0b668791b3e541d76a38b7c1af7" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb", + "Rev": "99056d50e56252fbe0021d5c893defca5a76baf8" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 000000000..f037d684e --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/bazil.org/fuse/.gitattributes b/Godeps/_workspace/src/bazil.org/fuse/.gitattributes new file mode 100644 index 000000000..b65f2a9ff --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/.gitattributes @@ -0,0 +1,2 @@ +*.go filter=gofmt +*.cgo filter=gofmt diff --git a/Godeps/_workspace/src/bazil.org/fuse/.gitignore b/Godeps/_workspace/src/bazil.org/fuse/.gitignore new file mode 100644 index 000000000..2b286ca94 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/.gitignore @@ -0,0 +1,8 @@ +*~ +.#* +## the next line needs to start with a backslash to avoid looking like +## a comment +\#*# +.*.swp + +*.test diff --git a/Godeps/_workspace/src/bazil.org/fuse/LICENSE b/Godeps/_workspace/src/bazil.org/fuse/LICENSE new file mode 100644 index 000000000..d369cb822 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/LICENSE @@ -0,0 +1,93 @@ +Copyright (c) 2013, 2014 Tommi Virtanen. +Copyright (c) 2009, 2011, 2012 The Go Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The following included software components have additional copyright +notices and license terms that may differ from the above. + + +File fuse.go: + +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + + +File fuse_kernel.go: + +// Derived from FUSE's fuse_kernel.h +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ diff --git a/Godeps/_workspace/src/bazil.org/fuse/README.md b/Godeps/_workspace/src/bazil.org/fuse/README.md new file mode 100644 index 000000000..471b2b258 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/README.md @@ -0,0 +1,23 @@ +bazil.org/fuse -- Filesystems in Go +=================================== + +`bazil.org/fuse` is a Go library for writing FUSE userspace +filesystems. + +It is a from-scratch implementation of the kernel-userspace +communication protocol, and does not use the C library from the +project called FUSE. `bazil.org/fuse` embraces Go fully for safety and +ease of programming. + +Here’s how to get going: + + go get bazil.org/fuse + +Website: http://bazil.org/fuse/ + +Github repository: https://github.com/bazillion/fuse + +API docs: http://godoc.org/bazil.org/fuse + +Our thanks to Russ Cox for his fuse library, which this project is +based on. diff --git a/Godeps/_workspace/src/bazil.org/fuse/debug.go b/Godeps/_workspace/src/bazil.org/fuse/debug.go new file mode 100644 index 000000000..78194ff20 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/debug.go @@ -0,0 +1,14 @@ +package fuse + +import ( + "runtime" +) + +func stack() string { + buf := make([]byte, 1024) + return string(buf[:runtime.Stack(buf, false)]) +} + +func nop(msg interface{}) {} + +var Debug func(msg interface{}) = nop diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/.gitignore b/Godeps/_workspace/src/bazil.org/fuse/doc/.gitignore new file mode 100644 index 000000000..6ebe2d170 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/doc/.gitignore @@ -0,0 +1,4 @@ +/*.seq.svg + +# not ignoring *.seq.png; we want those committed to the repo +# for embedding on Github diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/README.md b/Godeps/_workspace/src/bazil.org/fuse/doc/README.md new file mode 100644 index 000000000..54ed0e590 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/doc/README.md @@ -0,0 +1,6 @@ +# bazil.org/fuse documentation + +See also API docs at http://godoc.org/bazil.org/fuse + +- [The mount sequence](mount-sequence.md) +- [Writing documentation](writing-docs.md) diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq new file mode 100644 index 000000000..89cf15158 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq @@ -0,0 +1,32 @@ +seqdiag { + app; + fuse [label="bazil.org/fuse"]; + fusermount; + kernel; + mounts; + + app; + fuse [label="bazil.org/fuse"]; + fusermount; + kernel; + mounts; + + app -> fuse [label="Mount"]; + fuse -> fusermount [label="spawn, pass socketpair fd"]; + fusermount -> kernel [label="open /dev/fuse"]; + fusermount -> kernel [label="mount(2)"]; + kernel ->> mounts [label="mount is visible"]; + fusermount <-- kernel [label="mount(2) returns"]; + fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"]; + app <-- fuse [label="Mount returns\nConn.Ready is already closed"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"]; + fuse -> app [label="Init"]; + fuse <-- app [color=red]; + fuse -> kernel [label="write /dev/fuse fd", color=red]; + kernel -> kernel [label="set connection\nstate to error", color=red]; + fuse <-- kernel; + ... conn.MountError == nil, so it is still mounted ... + ... call conn.Close to clean up ... +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq.png b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux-error-init.seq.png new file mode 100644 index 0000000000000000000000000000000000000000..fea214f71f210fe41eb204cf925a9b8b81305c5d GIT binary patch literal 29163 zcmeFZ2~<;X66-5vmL9& zEeO8(WG#2@+`+J(M-OkD$PMn8Yj~ii_}6U8_8H?8??-l+Rwxd68_tP)8$M&5l|pgv zo*CDln2NWn@HQl+S$Mjt-L)EhXs1(t~9=GvRM!ZmEF( zAq5O!5nY&=3>QfW-eQJ~n)g9ME_|hye>3%$Dy^kPn2wbxG?0p2UM{63wST~>(VXzu zvb!-RrncMe>v+yoOZ)Jf)xiC+@4K+ihMlrNRn$l?DM@}8WUWPt=_ zDfKB_u?1V;y;_82dZW*#&8GugTwJELeGRpoalYeT?Ll2@b=z$#^rNzWRiAu8=AbSn zgw>6`#!()o?==g#4xfI{DGFpHRUhDc=QT<+tG730hg=$XM6g6QaepbtHH*t!xjYoemzY}h~BkT=q9%HtJz& zNZ%{BMu|fbHHWyJhkLq?p9l&L4sKjw|0Gwl(w#hVTINuLy-NIjQjTCO$s+Wfm9_du zsYFC2y17wVor9Z*?0Z5DW{sqxMo&-rWo*2le^muo^hF<(J@DKPjjM8#`4HLl1CDc! z2ps3wNNjFnd3OJN?*J*;`Kp4I`isbIjSfV1TYHdIMyk*2{*R;C>#zY0!I2yS%;q0+ zA;fkRDxiGrSlh(WsRlo0y!dtMpS~556;Pw^%~W-LErXP~%GaXxjmJg{Ut34}Fh!*P zcEY^hM^OG54?crGl~zH$s_4b{ZP9ZCKgT}E4lf|uxLKWjG1CKE zJ;YTs+d(~H#9!OrFxspOJGsC25o4VHc*P5^X3p2RgE0q%v$!Fs&ju|$+%wOP7`TTM z5FPzx(V^n?t0ls!=%H8s6-2`OZc&e0R$o?zl&)Jv+d0u@mdXKoMIRwi9L7FU+?2x; z-3y$lGRyp|KZ)jmafKohiO@gGPqJsIfm~F5YWiefG|`o9+}CaFOXYmje=l*QYWQZj z85>s(v4fx&tAk z-3&pZ0CX9F%i8}p4ZpxyXGI5DVdeg%oY!R47uvB`y1Ib zd28E?S!oA%U#t(2*pJTcc(SIP%OmpFCTXWZzqmfs{_UoMSELOeqTTBr)OC}xUl|Cg zdm@X%))4pg-VtAm9G~d@HXoB%thQIAO!QkMTSd|t)IV?2SYJ^{YVMTK+Wc;o7v6LY zT*LNX>C8W?`3B%<9RVITBWCWa+C^?lphLG)dnC-Smt=i}e`4 z$$_3Fv@|QMkkOk3gJN0NchDv3swa}sS-nRelm<~tAG2~KNo;PXeI8$oNW;=97K{#& z@jbC_6JHXZ&*x9?_1#UTJ7DB5k*x-^d-#Pig5i5qxrKVkC2hh*y6H&EibPm5TQ5yV zH%z)U=p!VA+|Rwi*AZo+I{w!jrLk2-!GA%^=HNg7)>qc*CgsAa<8OkQCV$yg6Hw*e zZCdu~cYZN3)sNEjeA69|{xCXYwlfQCYp45vBW|ZuL46&@N|t3| zom((Ab}SX^!B$s2!Mtape8v!%wNCv1z?P>Y-F#UeU-pq7DWa1{*<>jj`y7Y~0~sR8*8#Oj{6J_DRyYIA!0^CB|qKV^kz7V+>Ejs&Y7AukXN^*Y@vpaSmF0 zV++mL$f#8EDb`q)nw}o!>>L8!vfXBAt^QCtZ|S>LYEn!|>IPjBkl`=AMN-U3jH!vq zx4;_iN1C2~?epNdt0Yk^)^LN9izqDR5S8@6f1rld`IW{eJqDgs`BZ!4?|zuE3fItC_nWT3r@aEnT1?37J;{3yMfwlKyv z&ScWaG;lJE6qpDxmfeL>f5Qkh_$ZJYhOmI0YUrcGxYoPr=_AM2loUUH92y=HvTB@u z6uizc5{a~zEeOqV-ELnuv3nXr5HG7Gh%sIIB!`rRF$Y#pI@lu1!A1vJSm==>UxU{q z?{#)w8Hk~%k6G&Jn3{aJ-Nnd1VH)uES(qN&Yl~s7ZxyU!^j1_oBn-kRm(qF` zMQPMU8W|b26Z-zu^Zk(_LiZW?Z>m|!Nu&dvNBF=Y%Kq(LF?1> z^kVyoFAihsU$D*c<~67?Y>Je;37==|V;x4gatzgM#WunETz{`N0_H_SKPo-F8=TNe zDr-Ry(Zyf9D*qwj5jd3%7zZYFyGLBL-FR1+f80~8OssBU9mo@b6D|$bt#R+d^uFNJ zNQLF(FobEyr;&A9hUNGvQG)d^w(Ffduwq8F&G1rNFIX_Ql9P#ruiWZZwA&4=di=dj z-@p)mFN`|&NEIpBev82* z$&|i}jxh!)4J+(xSJ>yc8on;3;eM>0p>vhx)W@pVG@K^ds2{am%XU$WM~=B#uf+Fc zAsHRrhy>WvFPL5(HV6~41!Kj^FcT@k7*=C0|6P@rF>O@;TvfegTQ|eA=<n+arJbF!}l!ypNG@W1X8Y%&K$mA!>BHafG_wtwc>#v-W6y0pqiQ zk@2|it%qe;(id#!Z`idV%&-gIg?x1g`%y~R+z1Yo8Drl$GVw?|>qF4}@SRD6dFN$R z4SZXYEr&3NE^MR*Bcx!RHt-0P{i23E5_|XH*%PEX;!(+dhK7{e&WeKa{rSUq%pmhL>!WgBkgzy12#lB&;YU@ec%`)_UL%Y7vq^M)KX|0|NV>VP$q!$B2y{{i@5baBi5cT6&%_xNVqsJ9;Z-~Slb`n`!OQhvA% z-os2&MLXOlcF>*+!9%~fb4373TTtF`KOx3{A5$zPauFg~8|1cOJj~KuYUR6tgtHRLIe+91ji7NA7{V2cu4LZ!2_WlJCQ% zx?2%*o!`yZ+@JuABvOGkr8|``mSj#7_H_0JN^aN}@rw0Kr+lY?tjLxtloqfa&P6C2 zMpI|vI=I~rx}mxOk7cO2s#XVzW9|w#J6LHueme3RW`QPq`Uq1(%!}%1Swf^F_PnSp zi!sh1MaOfC{=&1NEx{_=lP7HHtHt(Jj*@;y#L&mvo40g12;x1?ISn#bnM(B#W!-nE z{}7~=U&NR|8_?^Tka<}EINMY=PP4)la)9f zMSSY+?jHOa8je zkgBoyKH-4V$JQa}mnQoB`QDZ4qA*ckvx`B>UGPhWRbe}?4⋙ZICiQ@stARU!Nq(oQW~iy7x>6>VQ7wFkEwvB*0{55JaQbYO~=<&mHu!9Yhx$7o+) zK(4D{di(y`H{tUqhzSVU{D>+ya-H7<1_T@!9Eosb2Nmp5u_y%haK7IaZ{X+x)cSo$ zNXX@4+6IV@{3hIXS5DaS1w&}!6-04(7eq9*cQVY%(S4yi;lZ{@YzinV6Cgp*m;x*b?q213U2zP_<0U)9*sFs+qPt zE(rS`Z}{(p*VF9`MkZPn%0tP{>R1hE;9qSEgzpQ>8)nfb9X9-9L%~(w=*@reQp*w? z2c}c}N*SP9)O5a02l+Qu|d*S>tdKDvpW zT5*(Hp^o~0whTr6e&*4q%u=?`N+0Gov5$quA*)*eO&e^eKF2ug!(Ar4(DBSWv72og zeb9|s+bN*>@;S%DxosglZ)$D7p!2S;q@Gh)VoAS1&)^ZWobh-huS*9ZhgS=gtsG!y zHDz%wHP_EQTvFMyNM<_qJdyJu&Gkc(MNh>}(w4*kt`|E}T-@oDzwkKD z1$Ih?g=EwmF+IQPE8k9oEiCMTj$~MIcT|Q<<;Fi^J&<*`H654Gq@w0Xsdv#-ZicQn zY4%+AZ7-EjTBVu}#iI9EpZg&k>w9$pF@a5PjX#(4l*H~XjA~uHWp2#F} z_=BN^78=oJ<(|_%h4QfWhwhaEW`JEMS>8iJG2q1bHc^t+ilQ6K0Lu&4>Yf(2Lo30C5Q-iw__ z09gNG0uh9(mTj#e>-(vSh-n3`uvMAoG@O>;LFEMm1{%N4O>%KwsiFU2s_m30pt)v2 zyK|y6pV!ydcLGXYlXAS^sR00Za7arh;?K-(zZ+R{0PWekHxkEjXZk-bQZgwo8^Eqx z?wQ+ak~kijFT;Dw*|1m=IM~-r&jQQ5O)Cg2X0Bl;EI^#t_*t4bjydPC>Kfoqz*8W~ z$DWnPF}T<`_U?lX_i6>9=8=RM=U=yr6=KLp2X$#6P2euTmbX_|SC4@8_X7M}Ri$`S zq=A(5p@EY<;ZrGziHVI7mQ=8TFzu-c9uj^IM}3U3!*2&`>&O-y%yCTsrsfTDr~Ct9 zblSt9MQK_;=qZNr+-NY>|0_2{VI686!3~gplt$Y4Oc((s6UL9Cd1|wb0<$LUo*F9k zeoa6Vufa9H?|#dD@HP|U;n(4FpY+h#P4F+P9fqOuc@O&v1M`jGa?WCt@{R_7if65S zF#fJ3qGa9&!#aPrnB4Mo7@qeH8uH?me;D%r6_aYU!_m%C>GBe}|9~e{QsBp5?Omna zAC^wOzQ6gNf3dKwUYS{Yfg5Wgr)86o*?bt9!?})j@xq;{OT!7rK zL0aZ1+mD}*$yIG9*8;k#RESxWe#!u?Ay zK=molD{Oh)(Uk&^q71W^Apxl=@6kE#`?sa<^*FShsK-_&ZVUg?rb4m7(NBEG#m)}? z+THE~iM`4QNyG>s9k6EaQWQ0>LbGC=U}iQP%Y2wxVb+(rQ?ufb2Fe^fpp1Iot+1bN zjXe})l=a)V)beK}bC1(xg>AGVe~yeTqWdqxx^oJX$G09*WJR-a3lp{+VBwejoQ1(` z5OC%CWmb&p-PIfL;iH_T} zHhtBY_=~(7yNtTozN*ZBpfo+&GEL7th7kL|_)$?e?_s~!d*jQS_A0~06YKvwtAmov z@y^=3_x&PkriZv`+inBSwdZT-Xyl3+SE>X`S{T#)x)^Y>kVd2JZTavaiQC+44~%8O zQ^VU~81ptBP3UBnx+pw;^5jOfoJDo6Te08HKn~K7gDre-CG6|#n;%4shx-~;(X)xj z-5u|rR~D@&*XQQklW#_W23=Ch2{SVh@m;(%dJvtDBkgJ z8I}JEmwC-ixv%=>`hS2$h_dCSPNHYg$+jtY+kX$=p^LvH{5oT)=iUgo_0N{*))^ho z?>+qro4ExfHZQ(@Ju=%51J~LJxr&U=M}@YL;haOY5L`u}4>o|rS`CIJ*NeIvCLwf- z0cbvh8#1+x$D_x5t>+YvANCgo%_d(K)Bxn}w4Z_+cJEFInc9;W(?|1vPcD@f2Yzf3 z3?i#lfme~0vHvCMo*4f!0&LNl^MO?!B5Gz^Br{h(k_TDnagn{#NKrC_%q0ai7LsB2 zw|Z(TD5GFtz9!zGFXRTj#Zo!J&5TG%cas9$TA>WfUM*E>jU262Mq9199uC{CYow9! z)_;I)Xs?N+<2K1{oV%v(cgm1ui8DE)`P<1vm3}{0FfGF`GhDJFNdVnRyj^N@Bc;^< zDSEvLl4}`UM%Z9SbbQSU%DwMk0IA;sB1eyFT?k+=BvTFEb`>!io?;g$$Xz`N29r4l zLRcFrHwh<8LMutnO?Dd33zTp1rg>9kwm{0{4o_1=r{_T1tv^_-|Hy^J%)K&?ZA)w@ zbJZ0n3fW-L(SmjZBTrK*ixqSCK~*le+)XCs`Fr*(b=6QZbH-b9J$CHa;Ii@tE!#^? zRTGRDJ4TfjY=+N~ZwrHr*~aA;iR16*iDhKsUUFbDOGJpkYKAAaBV zO&JQxWdx;ut=NMD5%_*|uZHOJxFt2-#YjbmL#_yz$s_@sgURD^} z4Ec5!8-cY1{Ih#B&8wNvnlHc9-p&BP#8RC7xL~iIx#DD~o!$Z;hZ|Znu`Ac%m7fj@9$nHWV7qaI0Fp z1KwQtLSXU4(nELXmcNzoBm&s*A1&|oan{#JF3ODja z{*7Lw`;jgRB}>kky_E4cev$+3wuikmWaT95S#LeRD^QpRTXTYt8 z5pRUwGw(}J5y8ui_+CWknUU5D*bc;o5R6{PU0FaTNfmm4IKrpQUBkXELH{^aHM=JX z!-*bpqRq6j9!Yp*H>T0N_Zjxk-*+?9Xw78U9KSqf#r^91etRWwBk*T|7guHGogeXk znyov9g!sQGOCm?x^e{01Qp@5~6q*6nQ#5Y!fP4imi5yJij(*N#SxR1bEqjugerT^& z+1&E$7n+S+3;eaM#5`kD>;sz{mEia;ZE%jf-EC?&WqM9xC#ms7c^m_sSp7K+tJ=I9 zD4?;Z#r?M2$1M0{qCJ|?bK*PCJwW3wjJXSG5^Ksn+)SaQbENUuAM}qc1QS5<>POM< zEH`^zT5XIpP4jcU ze9JB?OzLyHK0t`TcM2sgCE?wf8~xU>t-RhQVCH|`)z<0<6y@L!p)&ISJ)(ufr##~9 z<8m5<$!sC9h_GX(OT6TI1-h*V)6Qf96iIN*5tG#Q^NSv7e>O<5J~{bfv>X@c+!UND z&d>=Ali}H<8)WL?Nd$NAdG(y^Rnfyw`q6 zEEA`mD)3FDDFT}_wB!UmGEy(GDW^wL4^c}nn-v(Y``$*ajKB<*ZB#_&rwVmQMz&ox zu^50x{>8NKhI~53Imxe4c5_~+jUsB5iH+17w%#LWT8FV0VxF@?q@mH4>*Ky&q$`;% z!e8V2#uc`aoDqqr+2xwQgiqgU#b#&t@6^ySU1Hu)z##gn9xw2&`#O0OAnr4^NP$u^ z>%^iF%}3MPdODx~&JdHO!un$!A!igxZdI0@d{IdtRhHKkR->jM#J$IfBas51)jw^c zuAPT5YhYbb9;K(>gyd8IJLSDhO<47mtyGe*kqsLLki*3>l#v&Ot6$+wrhtrlR@h$EadHG_s}y2RfHu zeLe~)G+p|V{szbc;>j&wS|A%pxUnSw|G==ScoJ#?Prg|0$tjPzi*={r*2m*WZz0oj zI%FJ8%C|e$k3tHvxv{a4-gR6Ip+a+^zSUACWOfAh-0f?@O$GI})9*rV%lT@1GgwB; zwQIwVLHZ8QJFbE>DvZx z$z@XXiZ^4;=MJRtZyAY*N9A4tX^ovQVru741y9*jRRNnW6Xnmpbtu zmVpPiSYJ=^UIO3fouXhL$-4lmYg%tHC`(96hE*zoX=Cl3si=kVF^3XY!Kcflp z_6lpbrNO^nfoWNHn!VwG%LJCC?UzDc@$-}R5`NtUQd7aJ!nygr+{E=Xy`z#;X;#rb zr*J*-EI+_yolkm(dpP{MTmH+(6$=@UG=?lX9_}8eKns*LGRQl|tiU!c`6K4Wu0-ND z=*yzuRU}fD>F9t9Rm+=xp}>cb*z`1Ba5pD|1c^9;hNy%jc+3fK^LL^K_xU=lU1h;a zyuhu0Ybmx2ltdDa`>QXN>S3K3V=3WOACKW|tJa)ENMNvjYxPD^3fjuiI)c5;6{V7< zd?K620jM^iM^f0Ssp}j3OrKM?Ytn||Q&;XZ3L8FAu@w;#UQwOqb9-lJ{qo0$*T7&L zuhPdQbN99@#ygwS99EGxs%MunNEaAgE20Ip{lp4?y%(t*>T_DZ2e)w6+}mMqcn-Ey zm8n@-SuD>u{qoS+o?Cb~fv0@-1KFf}ew60bYwJFQa*heM`=2*4C9-PoTmXE@O`A=u zu-FlfD0P=?QH{4MQ^cM@sL2-#?iTk*EPKZJHFb)=@Cyxk=}k?Yj}vcEON+9Kp7dO& z2k>v2Wc!|{5Lp$p;Jz9Ax$jdYLw77$obH|yA4w8$!KHh&rS^CX3e1f`l;+uG6I$QJ zKJhFdq-@XBE{i^%i4Jg%=nZ~VL@$Bwu#w^LJd^Y zet*Liu};$n#xZYC2n#i8DJ4mOWoq|cwQ7|&q}bZ7s3=gI`B>^*|GIZFAy*u^BaoAC zkNfd12`P|&h*MaooBp7A$IC1}X2~M52f3X`APRYCA<-@JB_NbIoV|2T4Eg8~-hT*O z$+fBoa&~*PH?V?bSn9SmS{BtV{Eg+kFTCKy@x7%LP0kNQ1%dhu-xZOZFBmD4kk(VN z>VyU|zp{?1h$eEObDhR}pAO?Rn*Um*IVoCBcKVU7%B^AcO6tujGVxHKL@Pgf@?_!k zM}*H`_12=*2bj5*zMouqjr$k&cq6129;~+p36FpZ<;byP%M)3hcp|ZCIx?Bn)vd?L z)IOOp?^)tj6<56_d!>k8=o-gMiBI)u=E1Cbb)k^^ub$VqdN^%UPi6ccQcYcJS2AWEU*0#5mym( zh_zcit+B)NVb6)Du7>O7qlxJyW7wY4l!}3x!Fq>kO^0b_rt=_Qv8R!gy?VdR5@kKf zk>pQS5ZMvo5HNQ*dv1gIssbJ~HWJo1$Oc!Co;h zV_pmK9+z_==aB_RSf7^65jGb{Q!DBPQn5cVZ>spMaznm9wG+E2;SP;{5QX*B=@nWS z74F_D3dGLzPh?4>ou+thsS>CRY-T|?ygtVCU~IPi8Gkk}v2?GLA-zE?>+S|AfXK&z zN-7eY)9fW(C!W%q7#aQ~Dpt{eo#2Etm--So;ukbdqd#v=@7L9CGK~vMfX*o=>qq_U zTn~`vz>mrsw~m7hiYCfD7)$e484wqeHu$nPgzEVwgixgc;ZIp*shuM~vpP$&+KeQ9 z6$c52-4ba_vtI#~F#-ZtazC9b?ZXE6vSxc01P-cV3MJy3Aj_|BHqy3jJ=Y!WW;CjeR#;nnUz5?v;>Y`K267wuh{A9*1+Q zGuj*=Az4qNi#;jb%N(o?dRRf@hxE=Xz#uaPny_fi=YWGs>2&&2Iz0n&Kh^N7ZhS?N zvva*as*$bjy{CvSJex?Fy(9Jw?NUdnttXo$vY2$6WfEMFmj%2Uf4O5RFP}nslx1+3 z+L_0f(F3^J=#h*o8sX+k3s-9GC{jC{HM`Mmby@ozuurkMhCT2mGp*IVy}biL;MVdq z^y?>F{n?fZl;PaYPwu7#7hE+3gG52>{;yoaB0N2b0FP8Q9iu)cFZ9*7+GW|d zCPm9e-DLD(ja$hO74m72+|2giq%`QZjrLTukl8sq;*XuH8nifLQ<%pp4K6W?mp&CV zV@^Vmfr-u%;`9AN0o$eGnaOg1tT1BpSIj6AkovfZ8GWCh1&%4Azl7EbSTuKy)!iar! zw`lDByU*2yV4S9ri?SzTX`yFObL5u_75#{XL+n_orlr8!}jf*~L@=-$j(;`>r_Np7nyX_Zq-Zw$?~=AESVm zf7ZU@gSTpVA)8d!^Kh88VeKh~?|96lNflkZ_xP!dKPM%@+RgwZFc9^O95XOGy9*_Y z+tvt#go4igh%y4^GwgzOyGcU`;@Wr_drYCIK^}g=hV3(=2O$>sd_`LrtjhMe-Z102 zu%K~Pi_h-~e9n*-VM1)=wBBed8GEJ?*^f!OGqPmy*Noj{`ykTV;apHm*zq}Zb&38# zhb*(;wXG%vR3)i&!3Hsbn55YMQ(^L;rE=nWF_nIYVHAIdU$3ZRd4_NF5^jzPZrct?Hc(%Gn4UGV`z2MQpnE6rt($yWQ4+LpUHBcMvP@rtS z{H#A=|ITaspU4Tb@o0_T&Gan%+p1PjPaf)rO#TS|f8wgZQZbz&p_7Mkd#?u;9up8o zI1DK#ivbk~u~8}(ymL!|t@C*O zAI?mz$X-@LfJgtAO5x{rZbWJ0tkoMpGvs`FGV+M?SD<>tJzN;G9k6jfg*U<;s0I{yd=xs z;;{ycWIQqq<~k6bk^wa5)pp`cMddtX_NyrcDpIPr9~1NP@~ZHe{|e!F^)h9-IJjhc z^~Lp={YN#_s+p^fo>LDXLx}wy(&{q6$QI$cElq34!LDMn*_}Z3OpCRxUs0_|*j`b3 z@3tFBP|t;Rn+UJ5e8%|z$c)4;f=8y`U3jzI2MwCOl>Av1{D_z#^b&WaDsr_V5t?ie zjpmeT+-!e`jR5K3#txeLJlY4FHf>s9Q!NIt=d5Oo!q({*NZu%7r2QIsZSq#G!~Dx4 zAV>~PSA186TJMiveAaWi<;tqM(7vX+P+T{>ch8=Pq9m22zz;;GXLsLmdJr;ocs%IF z@Tc8QmQg|19S0i?9;j!n4j#L%Yv>T*z3yTfJoIGbm%iC3;%nJ7vN`Y};laVe10WWj z8DGIpTa)`b8;G|nI}y%UByUZ_6?jsdmx1aW-yUSXYprzy@%CDL-E?~OD_GG{!!qTA zpf@&;(s23(Z~_n9-2Sx#Tqt>2Hb6-F{@jb`jOjy5O!0n~fc-<-OZCXeNIQtA@kLPtEBHXz45l?==74H= z^FLI>dpP$>6s_u1QFM~R(&VkiHtKhLZXOL{ysd@hlHfH38K$&X?%O?U1qiE~PLUs+ zA^VODpO|W3yA9OU+Es$@Xe>Yrc)A)y?K|EpQ11Cm7`;NaU(3TTragRmLeS0Dk2(yL z^ET)D2x%l37L@iwB61H}m%aazrjwoha9#KC=_N9oVNEntQoPRYVT?(6t|9E0>VqrN zVK4yM>DgzUo#$&TxYo3pbhbZjIw87WEa;)i8;8lwrS*KhBb>lGi6)?d%l4>7{Ie*H zZ$rsm=4i&ef-#C%y=QS^h!(B+BKlMMFQBB)fOO!6M{O$-HmvR>L}|7>4I6v`ny1FM z=x5-f4yWf%K-R+#HV!`+))PPBktT@RZ>rm>f0S6K&E97*^rZf1y$&FRZT3WDJGP+dz`c#KD_IvJ>xU5IWGPmjR8Wmz8h|Ry`V`&=~^0kAiVQp zAmkys+nH6Oky^hqbNpNMcS-oA4vtIyNo zr!R->FrI3S2+j6Ph-O;Y>#HB*9wA788 zHi=l#+$j`fvGv@j6)p9*{5eBDSYca$%+bC?1%EbiD+#MuigpB5p;YPdm>?DWeBvPP>&D zQ)v%!QhMM_$>@jD&ax1mZ=HYQElkj6`F=3FD+uEHA&|3{(w4eIW~37)O%qbHlBC6@ zU9$%LJ8y}mpJ@r|6VnDotD4!JvQo~_k;+p79oCCG7s9E*i-e#TSgU|C8$+f_?hKg2 z0A;Hq_wybiB^ER_F;?RbwVqY9PFqeQihpgWhbrS&ktvgaJwYIUqJ)Qk32K25G<1!& zf>&Tn(RVI%?>`bF)xGg&ck?zkW5X=)PwW zjKSnhh`A$cZ;4ny^WU*y3wy)m_ZytXaxj}m#fgtZF3tr2A9s#Z_|*xhVtmo;E{T+i z7tpOK{L7l` z0SsWL#Z^bQX_eibq##@k`DgPDiYWNRU`Msag3GVkA<#Du-rZVy)kz|+63cmzo<3PW z3bH69Xe2Bo#07jEWHC-aFbX&f2v-&X!UE3T0?o18gYc0-h}b zYH9z@mg)m}A#gW;{^eKB&d$fuU?P@kDWbmW_Iqaf29Y(8BQ{o|h{ghY!d7S^)d#>@ zHy%HOHWuP~5xA?Ee$?Tj(Ma&`{(z68Bj%*S(?C`W9=s~| z>F&MX)!@rx0Z1IY^vkb1+U*OQVWqCOerOt807?`N@9I2RZ2PHe)khyqaDlgJ{S3^J zU*;L)v-1NR3OcOMeRMb@>rTNy?20Cy0;l-ePW>W)Ji|0Bt3~hmYY%b^`RtT|FFb3$U~3V&N+-x!(fxX5%Whcuy2u9hnvFC}uhtK6ZL%1elH+I2}e=WZlh*h9!W zffZ8Sq)+VEt)ogQvUOuq!I+V_1+cfe=bClRRc-a`UC9fJwr2!$xyF!&34;6 z2u2{rX6T(?-+okz%5d{g{fU4Ue^cEGIvbG3UOe+3)vc%>7uziUS9L4cH7l4dFt~qK zw|-2(h3Sa$CUj8E6{?OvqXLCjxNu;H&Quk7y^)HmCn)G~hX(s%Y`de-T`*Xa-oSI* zH^PtEa$9sB#`+sZ1U~zd-54t|8hhY3H`?#jMX1#W#0uZquHU?C?79ED8n@o~N7CBr zEH1N0RtKJrz?S)jCT{wexkCN<=@665ami3=u$+(!!&cAZ(}8y;Q-hDs7P;~ikXewW znZm4BcVgHThNn0lYoVg(-Y|a+?XF(v4@OGr6K_E*V=>h>NByIXLb2ue68=E}Da|c# zX0jVMV7Fw)gkF1zoAd^)?KsbyM2C`_sgQ(h&L<-zULK=Z;{%=8S&q!nEW1j3%4(D6~HLDvC?{HF6|FQ&xq!_E$fHH8R zHrTtj;%U^Ez~yaZj5=SI#bOU8Rs}8ch}#e_J?%}j3FTJPN@V@T$d6Mi5O|QKNy5_3 zhAh5koku7~$Y{A$e`5k1rclcDcN%etFW_ybST3I{wCF1&DKHH-Ge+y9=d_ZCsoGVd zXV*dIx-;)r39$0=swW-K&b0yTFT%np{EAXVrv3#|rI4Q|buteOHCpZeURi&EzxqXZ zjba%K^zNQ?Iv%smYIKl#R#P`}{Ifg4Jif{MKwk#A zw{XaxbxcqbOp8JxgIF&i3Cear{^}8@4>J4PV-kCw!D&i@qv@3$RF8rsTSPgfB<|hE ztYD&lSl>r0x5fHQ9*->6dt^oZLRc9UIxIYYFXUz|}%EP2G* zDDE~*jOq24>?|R1SW9AGpRf4RB(hoGhL1^3--#`WO?k2)$?$vf0VhsTNia!NC&=rm zcoKhw-6QUxN+sU%^9i832ui}HDIlh*9AEvb-K3ew*?F>IR=KF@Ma8e9y@X zTpO~pOU;Fiy~22qWze*IqpGQxeHo0z>2DM_`0Y~G+*(aj9Cs^^=z_ZHit2C=fX!A| z7w$sci855&sLumSYPk9KTN z^Hh(x3292%v0yTwDkAP@->_E&kH15r^vw4g9?{pAh1gHy`xZ3x9{Uij8z^|p>KYXb zgnla^AjA8 zeE9?!b8wSJX`>av-DK)?mvIk#e|oTEw1{#B+5`b7A*61$0pyB@dF+ZrlJL1CJvsU zOT@L=Qvt_j(Oc+un?k%q`V*d8948Rdw9W5JCCZaI0pyM0;y#F$?e(pwo|gH^qObpw zj<3#TST9%(ZJHUxn|xvfSW>C=K^Fcu4?Im8_p_mcTK`DcZDI0_{#j4MMSZcs+Dy<& zKL%>u=VnWub7efPM9qe3*pB^cZx-)4p50h5JSUB}`jK}H8$W*MI!L5&ZV*&e)6cJm zBC=@)Ka#F&RfM}-2W3);F~t8nuLf-(C@yNw%S=g!>=VNjWZPfLjLM-%2EEe}xC$4gnHgl!TgwbX=fFqtP z0-l|Ubp*;fD!7pn9s$pojluyGg%DkUer?~AzN^P_8!K`tf!6B#R)3#@XFYKq98cm& zaPUuuUXDpF%rlU2psJQNmc{3v;V;YP{#OmB#DG9H$4b!i(cRRpV1bqZhczE^{ z#Aoy*R%e~2wa)qVMZhvQed>K2f7VIc)9-59y%q4OrETgJ-c*?% z_+dm@S@p2K4VFbgr!&+E#+g9eVGBopKsjJA(6ng|ppc*5fGLE#i;1II3uA)667*3rICi|c_}-k8)c_{S zp_5lToyqyxO?GH071&^$x8_3)jegWpXovKO{mqry#i+={1@8~uXuTFIg`1w+XL+XY=? zqf9&#zV=b^l9zH$Tl1p8Mon9vTYSyywA${j5D16!Cn{>gaV)RtuIu|7 z=m{T(Z$yLN#9V)F+)Bfw+;f#et2++RnA+CJeIYa@f^%-bcYropx$rOz)a z2}@T1lGPp4$R6HkQeM6WLK@~YyqJ!u{fAN(-P#+MW6`{LHhl?-hM66!90sLqw|cGk zV7K_oB}p>|;F;U;q<{r9Or@0cGKEZ^r$_H zG*%lia{wV1hW?eRL80bwtO}e*1Ih>wxDQkftMCu5?r&B_JUHNFYHKMBhy^Iujbc22 z_xaS|uh7~LT3679!6`Nb$O45x7ARm%aw!BPMgHnghKsf)ffLu@JSWa?x{TmwE>EC%gtN0(tXu}58pa2hi z1DLB5%WmKj4crWHAYri@Z2vz7aCIu!!c}r*lfB5 zipN{F5(53Rb9k}rq(Sj!F zzY5oW^b}=b6LG>rqmr4IyXzgu>4|3`FOdi*-%&n{T%3re=N?c+pEm&Aa6v@@6%Wcw*iDDOQu$e=I9C6EZjE9PsH8%h9Or|vdc>S*~opA z<~0)Xf$NVd&ml^3KtRCWa-GDx3mrjBK2kim4E8xPC*XWBCX-GBVG=0y7sf!vPxpRJ zNM;(8KMn`72+e|1Pm+y{jTvxe3@yF8Fg-nSjQ;uKoB_C||IKPqy!;|Sg^@?X!^3yL z30#601oeCHwG`*g;cS7j-_I7fUoyk$XW(W~l9mEm3!L}X&cTt;c1KDM(9bZ7s3sns z`A-hS2S2!l=>s&B6Oj#vP}Xhs)|aXF;KQC)x31*^yINoCkDFA&Spk9v3OJIz>+uU|DX zNw@bHKmWj&*|ab=$5powU}}}%oSi)&>;a8;q||CUUT5(2*khQ&$CcCDBj zrEw3lhW04m;}#2$_ZIv_rJHvb!ilFXkM=13{8aR%p!aVj zP!s}tM@IosAN+fH2}NNe?Jk@i`n7|#01}V`r}yRKydra{I@CfhL+5aC0-J~u{XGn4 zf}^Xp44=0#f#TV#Ej~GE5kNmG-+P~lUZTu7? z)vV-0B_|pSXq?GMl4zc@y3OKCFj7-(&<7yuQ0q@X;OTO5Vrmm&`dvH22Tc^M5KoGFF2J&?3l;gL4v6rF#Y68_9dKf~GaDpS^nb?l1aNzzyIF)Llnc z^PlDZyT{7?-A`w^L24J6xA-5=xaa(I*0-X4a~qo*ag@V~6y}jZ_X*;^$YJ#*@>)Tr zDuV>6U1PV>(HH+H;`La)MtOm8dBbC^ISYJULhJ7Zrr}3|^sQA?r0DmT18=`HbFasc zJADfrsi*P8f#Qj$%MLf`ru7@|*kXd)dvXM@tZri{nlk3m{cmdTI!px{=-Y>7j5 zWLbnI0r&=^Dqw%+m+90U<;A&M=(KK^yFYCd#rrD`*abCZhIp%)Vc$jp>%HN8DS%Q? zZU0vgAUv8aKb{TGifp~m1-m`F1M(}ez`jj`Gf*Mr8t;;&ei;1u2mJh=-^0()xBp)L zfA|x=x3cl0L)TDQ6^pMa#^L55Txg9^x;~{${^x3dHLJjB{HYnKE1ao|Qocv^y4y z{*L8;hv#s#m;BsFVAJh?ft?xPgiE0${=T$0Gd=A8Y41wonz*+31XQr7(29r(B>H@{xKydiPKZ)mXaOH+t;l9k zQ85Hf*_148E%K}-wj!b|Q7NSq!37~gG^9meiXc&00t86hh#JO#AqfzY^h`vnPwevH zz2E!rJ|%PS%)NK+%suxk|MQ>u2d=K1VfRE+6c`y70g&q~!PvZs)%W1#sS(2hOxjDO z_UR_dgS$9WZfNHp*JN4rkYfsc4Fro87&~*pM}fG3u16QtrOF3lszjS(;B6Pv)aR}b z(c8rpXtkWDvWnCZWkWS(nC8n|5N_yp@gD3c6Y9Z9_HOZy$02bw4bA1gq2)P~#qrub zCwX!?uox+bYVw@8CG;4JB52S zQfwo9s^&?VkZ!m+(Qyj?Yd$A}Qz$y!C5qq3MqS*R=n3Uiol|KnM^DxE2pm^ImxIvm z&8AHc&}J56Sgw})hOvTytzaL^qAlYbpJ`VkL z3jG#6?4xE+o2#G5E&CVnMfUTu#0ed$I}8!3eQcH#L*W1#63>Lsj|933{x`x@Uth}*NHjC+buzFb>ph#d#I7NMvkFhYtpLf_@yIi_# zHU&ywL@7)=@V(T8lz<73m&#vStD{vt)k^p?ue9z1426s736?RRoq0I_fmy&IthqgN zi+9j_oDrk+LlTk(+oG$z6J{U|m@@$jGy=Yq9ZiR12J*k&_zj-l!y2iUu zE8--_qEyV*Vc`4Zl!~s17t_uHzz_U10JnL)`B~IQR%-d(<3xj#LQ)Roy)tOwo<8gr zh0%9Y0~h}cR+f?xEQ()e?F*UjnAePKMuK{!YOA(-n>04(Jk97(HaC6B$h5E#2GVNVA;4P54~Ks(@i3BnVTqdo+; zAqJby1#e<@K%oNb6OjCi!3F^#+UnzhHa)t5)V!A)a0W}sf`kIhS^^L`FBf|H4f*9m zK#+@OM*~f5Gojt3a5})g?_(AdgbEl84BHO=!iaH*YWFg})jnW3F&YQ^KJGmSP%dbP zt5hl}LP+6oD{X#tr{pO5Z1?hSr)8u{adiV5=}DUs&)l zG97pe4`W*kVK2m|91DKLsLJ|va~XAuNI=K-In-bo`UmbeJDpmm{|taPu&6Er(L2V3 zWXX@h0#t`CkC<1HXI~?>NU52{xu(dpzHTt~UQ{|pR`3WnYi~UUyq02q=-=73MyvWy zAvKJEu0L%24^b+w?hn`~zb@2r?hC-oGVsy!4oI zKllV+-w0GmArB_!pluXj>!)6NH}?Q)F##OPFZ|Py@)R6X^BE%-yq`` z7dIUqpl9p%vlQnn(p0C4kNDg^ueWuXa!Qc!gXqr3XSPdZgOx;s*3U6_I0}he3?d03 zPrtgcnpPgCP^#WW6UaBK!%6T#3y~%}om=J4R>zZpj5eDmsbKpH1v6KqalCkiw%Z>o zZ4>M{LgG+=37M5Bk$tk!dm}{{MHFTaMTvC#^|XiWhKdB-BKV90Vw>fUS94<}8o5t- zbg)x7da>C1aL}Xq0{~GGL$TJsy`qG(s}a>CJ2+66r0Co_R4F^p7v*xpAw9pXslnfw z3<=t-K~KtFPnJ%5OQSGFYGy;jX9e%taP4BOcZs9Wx$25e9%{?2pnQ?0B7M?`3a<~P z^-{@l9-5NQijn9OR|#+TnfCVQm>`QH1HFC}1^ChrDATwrRW6Dn zo5e2W)JC=7Ji05Plc#W;AZ`eh6dHE!i1rB1SMY|2T{KNu6>a@qH7(DjlSg4B*GrC; ziq2iW9AglktTv7N7=$RUJ=%JYF7$R-gkp%^SlMhz7M9XfouxwaK3aO>Hd@42+Q&+w zxQE)qliPWI>J8b~c>;Q;h!L3gM6<=bY%OvT5BoKrKdp8wf?1+`DdbHujXWO6p|PXw z!lC1=q@S9TamYm%JVxmGT3z#xt45fU|DR9sSEa@8%K!q%h?msUSLWHstaf_QT;2OY zelwGJ>~EJ%MbaCxF1GJ0PMk(yQ9(eyBvS20sVsf>qU4M=#D1`v;vv$MDU=m^ zh9Mnd4NyD}Ym$N0+6Bn}f?;Emeph01B5=bAmE#q5w(3o#O7<}8OOz;-IkKL~2Larg1@DZPiBy{-^hF|_Q$lun#919l@ANQ zsVlxC3~gH}B08gtMsvCZ6)D7XCbT9T5~UoKKyV1zGvCiQ8ZSj)EZ^ZO482(ee$|mp$6I-%T@F`mV973?Q1YH-gVqzB}IM7c=4siE#j+TQmf63?;LQF87 z12Ogqz5eb2;Y7=E8MJcH44grTISIRvi5Qg>?10JK-B8yajUeHfSj~%lzIrfRoTF@C z$!yMIFtd6XvlbgFKM(rdKu_CLbGQ`5@9`In+A0g>JG6^#*Cd^Rss~m0R-b;43{toeEKirXb_~ z=J87wj=vL{#YoDahIg1TR~g3uymLPtygRVfPT&F_E^pdE?`;{Ng(N@|!{Am{CcF4I z!Wt6NB&_n5B6$_faSJ2VB!+EI&logItJTkWPbqZ75f0USus5Wk@<@=nyv3~`qe6`8 zmiRLue5auqlDVB-6zA-7E$;ZK{Jtnug454aT3Q{}1#a$U7mi^jg*a}J#CLXRbdY?& z?R*>+Q&O)?iHM=Kq6v787=;*SlqjC+xP@&^YWU)LuTxD(^9xmqO}44rjG!t&N#(b= zqZ%zwUiA*)?UD&KVI``c>X@#RyYYm#wd`N&y$uB9%FQSF1sU(*ZTMtA$1tFw z!KdDi-H;jLcg!XGx(@O&3pd%uT3SRBJFI5^{R5_`_St01rr`)_In*5`xiFPO$Y9t<ugnMvh*;R)$kE63QJtTUBYh0> zMBZ>GH74SWqR>SPb+OOug5E+_2{V`Z)%ph7J)13f(%4HqW=#tb`|ULVZ2wQLo4K&LNJsK=zelk?R{I9LU%EW<^Iriy#wFAM literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq new file mode 100644 index 000000000..a1cafc7a6 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq @@ -0,0 +1,41 @@ +seqdiag { + // seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq + app; + fuse [label="bazil.org/fuse"]; + fusermount; + kernel; + mounts; + + app -> fuse [label="Mount"]; + fuse -> fusermount [label="spawn, pass socketpair fd"]; + fusermount -> kernel [label="open /dev/fuse"]; + fusermount -> kernel [label="mount(2)"]; + kernel ->> mounts [label="mount is visible"]; + fusermount <-- kernel [label="mount(2) returns"]; + fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"]; + app <-- fuse [label="Mount returns\nConn.Ready is already closed", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"]; + fuse => app [label="FS/Node/Handle methods"]; + fuse => kernel [label="write /dev/fuse fd"]; + ... repeat ... + + ... shutting down ... + app -> fuse [label="Unmount"]; + fuse -> fusermount [label="fusermount -u"]; + fusermount -> kernel; + kernel <<-- mounts; + fusermount <-- kernel; + fuse <<-- fusermount [diagonal]; + app <-- fuse [label="Unmount returns"]; + + // actually triggers before above + fuse <<-- kernel [diagonal, label="/dev/fuse EOF"]; + app <-- fuse [label="fs.Serve returns"]; + + app -> fuse [label="conn.Close"]; + fuse -> kernel [label="close /dev/fuse fd"]; + fuse <-- kernel; + app <-- fuse; +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq.png b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-linux.seq.png new file mode 100644 index 0000000000000000000000000000000000000000..af373dd284c52c60e65a9514bfc077785feef99f GIT binary patch literal 44615 zcmdqJ4LFo(|2KXTr4rhRN+#R(LPb`KB4b&-C=@9wCQYSeQ1X_SnQfI_M7s%vL`4yj zj2O&ptyJ<-(#SMMNrN%uZH5^$*Ymw>d*A!Jckkc-cmI#)c#h|I_Apv=y`9&2o#*%b zd_LdL_xfpv)8;v|7S2Kl&Dpx;I~RnM;D1%DRHnl}^~&e;5h_yK`rWs?!ZP}04M|`1 zW)i}zlP*u}`|i-a{5H-(^Md;??cI-tdmg(>`|Z;7*;{V-M}2jTd7x>w>N@_$V|UT_ z8+%{UlzLwE%$qq!v_xZJzjtGd+O+4-6XQ7j%hNqCUH*CKz4wtOy_bbOhkHL7&mDO8 zP9AA(^W~$4h6cS*>X&b+JibY3noFDlG|1w3ZOJ0|7wKNbf1|IL;J<0AD)>K3rYYk8 zJfY#g|DWHqtInTQQmexumw(^oU~qU*OfS}heb|nz@R1PL*H>UZ1QerT>(JZg*tz9k zg8ofSJC)mv%tbOHNy3z8naW65(`9)v#$PR#XHkX~GU=4AFO?JGiPJC4k!5A{{(h?U zVyY>6;?=UwEumyh+?TaF3=50ylQK8u&3} zZ1B(`6thI(!w z{K8g+6gAe08O3Pi=jU&}{vyiFZRFnS^kkPs#NVj=ww=(nKi$vQLQFrVY8p-B>-8_D z38G|UY~JGYo5U@>qaWYDe_u)Aob3H>iI23NFiJfmckY0u{X1^?Y#cfsJECh($Ll3 z^!cVfH&hv)au>euYny`_of8OWUPWbocavE?5W;s~ar`Hf=;OS4;Rf-%CwHMQsUlf9 zN^H&Zdd64Ny~5=1oe`=Elc{HCFX>E+JTR;&QLpkd9txV#X`+ODiikP)`+hjiJsB3~ zX5Wy%vyme0c9ndnl!?S;kqm5PShTy0+7k3;UHm)U&EI=bVY?V`6>zpGFkd+NFQZPTyKw_1jy)_%~GIvX)L zR#|S|{WX%*=vj|UeJU~8)r{o|Hq+0=VcEvynj0m?>5{g}tn!L#U!q-0Tgp5^Hkq5^ zI+h(1(K@4JM_P%mrCpPEg=5~1=Ox700ivBO@=5bxeI+m6vs~G^EI(jz4p!XmR9-k> zZ9%ugxL#vnQP`=O&>}O!$)s>yzk9_sV`()H>TAl^7g5vI8z@cccjqvv6YEc5Z$>Rk zd^hxxsYxNSjX!u63IdXPF=wutRUTQ!eT_B62cbhp(tq9)JNp8|XhQFbBMh3nC#tmP zI%Ba7lhz0YCO*dp(*Lf2ZmVMQc%bgo^AMKFpJHPfHXCBvPECqR_IVS!evAhBd0!Ix zBUIsGBnT@`|tiV^S;UBJ8}fUgYSf~v7lT`fWeFz!!o+xVx@CX zB}tN=o?efC%mX?i#pO(ykRX$=nQ}|&kT!KFlsb~miir}LHE ziH~Cry;hPZS+X}-vTEYQMdCzOA~(@)uY4LQ<*=t)tL)I%71foM5=o0QQC5(j|Iy8D zWAb4SZTlB78ZP3^o%@NpqnP?A6(Twt8>7mw{k<5W1tT_NBmG$B>S;6`vUqmJ!Gzl@ z<94MSc7>z$k_L~IzLCbq#|tJRt>@^+yf}J_#2&jv;q>i~9!jk~J=PbH$UVo`AgkK% zd<6$=Uw)BOUte#ot*y;2ebX-sv#NoHY*Sx_DET9Q$65QDA{7#%7bW>r;W5S2=V>kfrQnrAiT1$!nzpm8v`k=)q#c4k(T z=nYAaF(-CDxj$EYKVB>ufS4*76R?>KIJ9(pX&}k1)eg>?V3Ijv49g64Gk)jTNG?X* zj$t-`{85||j5MONkeQoXLtaa7Lu+d*CpI>AtBO9{4}-(u)I*?jY}rNOTx~Uu<|kp} zNf?`4{;mR6B%@q~1_n`-6*>HLu&Ai0!zr&N9lD-Npasf(lv=SL;0^^?o&Tet7Z}qX zQ=htJ0%J~-J%-UUrqMKsN3~t4AEC3NJlSq4gk0HvPtWS!m(1K_&opdLB)ZaSFxep1 zgrC9vtJA`Rm25k6uAr~4PYBIq$7=?9xw&zdoqHFFsq|p8|IwrSMlt1o^mPeb$Y0`Y zSbuN9`As9RKw3|&UiiIEEi0bEy}SxXS#KmJHHb4kc8*Q7oX|4D~oE-5X!poQN;kgxnt!HeH_@KSmSBIrRMw z^2Pzyg~6!72>ih{eMrA}8v5|m)&sDJ4ux6a7kF`W9cjPXhda(F2g9yT^`1vFrag-B zU=mSf#Mu(6^LB$wvFY0-H#J=yN+_Hh_&IjvUXdUlmeV2QbG3qG>G5E-`76X*^ka5u z+8f5@c<4?OHI&)FFkJr?O+xj8rnX=sIT$rvo~13rIHOoq55`N0%Jcdv;zY0dxBD}M zrbd`$h~=B&0qw|M630>6E4RsrkZ7NgXRVT9(_}hB&MpZ9Qq)5N<)T;uS#_-zkkG-@V@DbmFlxL~Kl#o!ZM==PC6U`Vg7vtMOr=z>SR$#+0p|1+Y9L=8!Q)MQj z*vL&RAQ#IrmQhV*SY$7j-iz_KVNK(Pi1RG_;)hSV;TgJ;O=?X)*FDV+`0Jk@YwADD zfKYmJ>HlXy{Vn;I<2JAt>$2%IeCK}!)wJhRFjFDX{&#RWg%h&OaXgL=+2Fs?0uB7P z&2*eQpbH9rx9OmRfgWQnY-V|1p4$Ge$Jl&4m>?|Spt_vj;m;IaX-db>_?IT(-~Bz# zH@T^q#8q*B?U}yr=2zE%5VXxxo^RP3HC|71kr!j{S!RN?gYsWqjS9x<61hjJ4(H zfDiZ9WKcq5oz6b$4LHSBC&cnnb?hbu`(fmAz(uUC^)r`P_G*h{WD$Ggk)v;NrQSbY z0LDV+Z!--X)-{Airq4vQQhoT2=BAUGFMxB*#(W zsWL)M!&a{;^bgbUvI1+fzrVlrf~2}~gPZBKIWU30R&-FCYRe_GWM2HmE0k}}ghxfm zxDb|%VgictI#$b*dLJ3QRDZLp%@Qt*l+gN#*OwdE;o<>qsH=nhPR(GC6hP^G`5HH8-U{@jez#rfU`Ms|&dRvc>lSho>$=3(U)x$b;PO|wokpZJ$yNkIXK%E7*`A8Oz3s~ z({$r;dW2QYMDF!lRssT1z()g`ZV>(C_XF`M_TSG$80Z(WlYbdRhqmDpn%Yq{0f+o` z*4mu=^MoV@{O9a?iJieRp2Cxq-D-)S~1?4gH(Z{d!Ve1$6b9wmR}kNZp~4 zmmC+Rg%!}pzPP#+U07j?P{`ee`xU|0tLx8tCzN=4(S0+c?gym5;LE!zrRP!t7?>O9 z&Ea)b*xoyPso`%b#Xc?3=4DZ>5oNx_f_BsU>WIcaq^Wpd8w^@*6Zgr$$+>OD!@ISp zcJ^KQ!FsN%2imX7qf@e4nuqn%>7|qi35Rn(QyvukTzYR{eG!LeSkPWt-=%=mYEw06 zYsx75LWn?a9hD`Ow$W}wQAarmea6J%?Cx|rQz((u6|JgC-8l9QXEQ73#~H}!XXCZV zvaNgkQ}$dQSu07f>6*0Z_|aXF=^6)8$5Pq*w3F|&*O|F~VsndO_mf?^2<_Wzt#sg0 z_QR96mUvpLv&^KtvOsR`#`Jc&??kSwa4)s>`5AwL;LI;_ne5tuqx+;`i_fJ9Pps8I&Z_1+vm%nCz#}BOG6j*lD@eV*~6!lPb7=1f=TJGdEFCtA{L8ElI*nK zCLQY{TLmxwq4WFSRfwi^b3*4#`o?K8Yf)*#3{g-S|7Lw}aNAhqY-Cddd?D-%-{D0$ zH)$94gDz_c}^3Fcd$$qm z`Du9$*;U%vl*%xw<|aLTAp(E4aY13p=71IDh}N%+DRdn&9~M$hnNr zEG_$;AkuJ#!X~5FExZ&zGGirNS#=Bbk5&~A4h$F(AOZr6j;~j|B34H}ws6Zn{+t5z zmdkDUI5Gh%2_U$yZ)!5AvHnX@U>L-aB{3QgA3b{XSuxT^N~Xxd&g8MEjY1>8z*kj- zY*qAM1$)vd`(QaK8s~|W&LCF(im43*H7$2TxdP1;$2PVqMst)Azj4M!^l6SOG%pfv z_;Z};TzW(iu_o}4$jcdrNB0%JWtCIdRy>6Mn|K5}{MVNJhjf&7=SFhmjOF|OePBfw zF6odX`psA+JyQelZ^PAp1FCFx)#xV^H(%Mj?ykFrowRxj4*_8ntN23~s)^AXT98T^2K7N*#08QwCsEu&Fk6 zc6auelsI?~%N7Q38iS=?9r9uFxvM@@OWD}&HQHZQsdmi3AxJ~II`V@hf8N9L`1Wq& zpJfL_-A-7Ij*#0+i+&`r8!2O;(0Cx(mrQK(sw7h3009C9>(~B~wn8ofQE5>6?I1YQ;o>lJbcN0_gieBv;%B~x1 zZ?vOyl?Y1Tke$AqNm~G|SxVX9DzDM4eB%1uRijcv6ha1x{L{)=K3=m-6%oBF2?m$X zuL+1YhHDts!?oI^$zd4Kb$wZFC)`%9J#~8vEeIdk*2CSYBzY$gTI$Bj+lvbFuK@J_ zO{b-Vqpa^|&HwG$ydCmBpGO?&WF5%1V<$S_;v?E(wzF<+k^yvr_VnKo0c$I#Zd696 z)N9y6ywM>$7|c|#*rpcx6vxc8i}#ysbN>wDGB(?&pbRZ|uxQ2M)Q7~In&lpYa=APo zVgc|pjfX&~zlA*(;d{JcR8&kH0-l(g>_0!=WvAxWA@z)T_!ggICtR3?N6>x}i5d!Y ze)hXZZqpAYG`z>stoMJ}dBO3lPYZMV@^F081s2r=AHjkJ2NQXoWcgUjHbApTcExQ!>po-3(r5RotUPHvEHm5n}%rppd44# z091<%3rkz)=(@&e_R$Vv;BFWxD5H>4tQZ&?YM$Q2zgSsR0G*QT(l1|Jc^);{!*(0d zpI{nVlBR$%U~z!kED%&e%-&+4l6)!CG@Lu^3SRLN^=<*hW+X|*18-Q>A zS-I=9|IvhQ@;HPmD|k&Ro7_QBqPgPZ->XW8%!qvJyXQk_eM-DM*+pNebM8yX>oon! ztE-yAWuJz%>|d=)TYIE4K#O5W|_xQkyRq`3Sw<- zqqn(geth(ffEYsxGvSMQN-R;#jj0h5IREMwO26sYC77JmczHEl`O`rQ=$B7F8`q4# zPXz#P^*VRN=C>y5r zGYlu=VJg@U87ra2Gw>Dd;;A1WGF#R)+&Xr}cdp{pd{^MZhRz#Sb%&qBYW+K@qHyR2 zkJIL_&{l1XRmd>J2gN8X=hcZObo-q*28PeXXK7)qr(eMLHce9TAQ6OA5K8TLT4~zv zG(JO1b8#@)un<{nga)FXikccZu&~Pfd})6EKxMsmZgTs0w#0lMvRLn=2smaVo6XMZ z=;)}aWHOnz1gEPa?P=ulIh`f=@M;bU3L5Df7#J9S_s;vJ=bR-gIY^~p`}wq>Tx>pqe-k^%dn)rE>le45tnm=``!iO_Zv7}&C_DAQr0~t z)o108Wn`$SLHY+F%atJK)l~2A5pAwya|ev~xv2!0_SN_+MR8t?WrwJhg>yyb!l2a4 zN`fP5h%vGaPUhUKb-Q5JM0QSa>xP0^UCAybw?8f3n@6%L<}~I?L*B^g()M-lnMOJa zdYh-NvDYXhiJPJ?`&r_sJX*QrAb~?&eHM$Z{$O4I4iAl;$`KAHxPK`k~7~h94SK z;YixNdxDvZvT<08EWSO1LV#)@+Bb3$PtL*l%Yj7&XJ~G&a%sKNj0s$FV=ctGnUkr6 zNWLX+Lh$s^Nyryet&Cg#I4zNDe>VT{jh z)=zKvC$RW!`v67!PF*^;PoZe1-}Ifv_pryldfWzrF8n;!*{9D6$LBrD_|bC~mhAHO zNVQZdEdUh;h?AX)y8TY2KlV7(|MBC;iy-o+yHt9t2EFF1T5Jl8qm0Y7TV?bs3W5L7 zd(>lfe$z>>#Z00ee8R zbM)U)mZoj^6)u$E3Xv!KIw<%?NaYC10FDX4S7H58SfgUqHJH)TD`Yldf#>Rt-?TDv#OisJQ2Q1H0I91!T&A=gR z@1(f!bhOke`9OMzNG#kjoi^t6+YA_=M|4_ zcIFPs`Bl_KLnDOZYG!_Z?#j3j1Eno$Gm%p!+|6fT;;7FeJFD=c#RQ4m@nvzesE`8( z#AeAy^DsJ4{grX+(i=J;PXued-m`nSLa zK5=3Q&kk}=ZHNNCFu>55!uA&Ul`i?Wmz7=*O3ZEPb@WJ4zxecn2=>IYfP3IdT)5Kh zeBXh!s8bj3ip&T8R{Wztc(zX^#zLaP!zadIU*7)x`>QT*Up}aZh79H3moE9dEdi8h7sv>t!4w*NK&>SP4&!6-P;rsK z@a8U%3vVRXtgq*NsR{$#=UdofBXV)5eg8%33lI7CLBC%i9xVyM61jRbJAA>w+vTn) zkE3?_O)Uz4m2jc|{;dUzrtu!2ec(}iW&B%O^PipW&sW;L71wYPlH#rY^9K0q?+Dai zuGG2I<;WA;B;nGZZ2z6FQ2LME(;@j z!v(E&nt|UY-d53K!O&)+D*l1ZM_Y)}&4=BeZ~4hYui`L7veOCq3dVaS6UNp`3fMd1 zHwHJqjL~p)Fj=-a52hxdNLD=iEUKkq#8-|jn(@d6?{Ui~eHygXCGwtFeWlo?{rM4G zTX(E|O>y}#Zq$9Ba4DyzoMWwBQ#f1UWfOI1Emn*bYz05VPEAMzxi~!a#)rSxd<7ea zUJt_`C~?lpyn&FJj+McZ!P=UQ-BJCsJVMGf72oo(7OKT;{dHh&u!XC(_4iMsdFIX# zZ$6SbyNMFl94siDHRBwe>1Y{pjC~>4z9*leettjk@IefeheC~in~)Ig=i z0SN)C1E&*C_30Gyc;Fcp*<2mS<&DxT1&zkLOWNvgiHFTXxUZW}7o-#~Y8}*mokVG} z*7Xhf)tb}dp|*Lq@+$rjZcsc?;yvD1P@JnD!^qFKO(;2G0ex{N#SVe+y7{(}nwym< zFP*qdsV5*HRi?i9V>4k6?>=u}z3LjOTKdCyPtLt0+Q$omtT>9(t+0rQhy`R-lgAt# za$n<24O@%!55|w%z~A=%>mPrp_sZw{9*3mC*h1lPUTlZlNa$Y1BbUuU7K=TU5a&4> z|G2j+;>vEGasD1|$Hz!TJ5GO+yeR!)&Fw-?e`wFxn~-p3LxHH`*hKzi~7&J+PTb?`VReSlNz$`jY! zKgJfz)_cXej){aEXYhd3-@5}p1eV}zEh5Ba4+;U%8o@P#ck|+BdWz%Kz zjm3`A1*g}9gd5ojJ;pN1DNXsp;?uI&tNcp#LAMCAjdk)SJI)N`-hE^9CHe{fI;?mk z!}a64n_*Is&tz@}`lj$E+WA zq=Y>u)60%aL*Iz4=|gE_;tCR4;;cy1zC&GK;mUZ$AscNC`;i~)Tf9#t@6AW@ZNcfc zQ@X;*os(3-IF`ClN%xs+(jMYB{ICbCk_8hWzhB~Nr@Mv}o$budVjI->=e2z#AG?yf zKVdR-1vZ2v*%~#Tv^nP=i&Bk~!ipElyvmb;g&yUF zl=ccpGQ1|7_A2lfrxso*yer&J%MLCNmx(K6*=&AEF~>~i%^f*dT?@)0NyYe68`VO~ zlAyb9at^KLqp3pT);Os9@ulBqxZA+5c~ZEOd)x-{)u&Y~IAuMw*> zP=%Euu^-Ow<#PPV;rBy1?;|+DBwrJ(?Pp&D8XWU;SFp=xKl z$G#t7pYJzb{C;>B*wS)W*W(#_xN>c6EvKNMz`WS2S^{3Ez6Sh@-cu6DK5vM@dZ3RmR| zu^W+?TBrLS={Dt(M^l{-gm>1P16u}#6D)G^s544MzY2VFp}1p=6ea1+qfR_sku?dS zp*T7eQ0kGdFISP2&{&R4AJ6Q401wQ@q{Wws_sN3FdTEn&h6A;0QdMg_&+IIfkYY<4NRy|z z?5!cj+`K5m^O;pD==ByNR{L#}NS-DXv#HMyi|AaD5r?P+j#oS8p!B4nFoyhQS0{hh zxUnEkqC@7;DQ}1sSvdsMn2HW~w=U~2JD(!nd?40e?4x-~yz$7o=yYLGWx&Lc;A#NI z?+#HrZ&7@D(oDFxy)nN|BrbhciO8OT_v)p?X`(x)=SVoGpNnEe-`tl`r2O0{e+D%8CKmUYym{{=ehqr!~(>_#mklcN4u?ACC zeNsT8NBU@*+4;1q>GBcJU=^G4NW$v|2Y0<%J;NQOXAvVVPAU%uPB1jP7|$)-L!YpU zOdXzFB#(l_A=P-#wvfx`i0gjYaO(MpYwo!5zTnkkxnt6#H%AsmGaKrO#hgt(>1^+C zOBtu0)3(mH`g}I~oylOmb$jWtxJlP?o=2$rqh{;S%ios_L=jFCwXlhvO`psi-w`)~ z*XpFEHXGZx&EVz*mpB+)*Iyi!yUa=qT0?V^g=e0YW^uNKRliS)){&1f1EpVYB(;^ZUZ zBo-}Z@MKtZv`kVABrDj>t)4^PnwZ*RIfMI7tImIhu8VkMGmm=m238tphps-YBzJ(j zdJDEKbYQSv-!GyJHD)~o!@_+P*jSl z{xW%gle~YBvSH>E=D0TYeoy?t{8jlbD*6U@EY98Rm1V<~AxwZ27!e#iHkOwaE!hLY zTtWWnmG$?G;&Ntgiwd47S${GCr1Sd54d_Rc5amb+201HTJW{}=I~HbDliqM-(AdbR z2vqfTF&dko7y@K3I;9|J)uc8({~UOS$8ypD`uU|8b!nX;-&y~Q4jJUzqWnJZuP?v# zf@fCo;K75daF)8RAy4qJZl@Wc@#RVuEj%CB@$fJ9C?MJ-fX?RLTxCdoLi}9|mSi8W zB;zJyu~^)qVXKo=7dkQ#xtBe5^aQ1^R61NGTt*;)HQ5cE#Bf9P_ab|2X$R(`%`cUA zK$2<5G7`e<2^qLJc%IM#j~E0i*2Yt*`RnsuP+i9rP+RP3McOCn0t9zC(tGN35cH1- zQVLLw0W^L*o2;b`xP)T0%g*8!7yP5_G+p(atZqAvu)wfxD#;2jX&g+#BcN7DMx9P(3Q@&#reOS$L zn=e4U+k(ozO<2?DHt}l(7Ct5zVE!%#J9hv)Oa>^l5&26CGEyNgw{G?|Jy`6alH~}9 z6d}5ETN=}#T#3gHuuoWnAsEqB6xzYp3)a|O??H2=LjQBmPvTC%^OIRk z3ctO3)T&BfV}|>uYj97w7d$=&Ebqmj{ot9P)3FMyAnC^&(YO_AI}KK$uAwk%Xg&ms znp53z)gX{_go=9I&4PAOv*^W~YfBb@v#2612Ra%)}Pm>L)X zOFIrucDs$;J*9^%t`xCI5_U+7TpXVEOq&!r1j+daz+FJA>R^)IfwRxx>2l<5t>2cj z> z7nSF~d*nU$g#K#mhyB54nM(k6xOT;_BJ?`xJg`u*DN|pyI(qZuobE%x`t#8Rb&?l| z1AkFx0O9;QS?R!~uzpx0-En&%T4HKF`M1aa;VeX9UjMoJ^t*TQ|MbZ_Id~!*p;)}j zJ{u$dhs}rQW3hBxziC!?KLWvKbD$$ zGdWIfCK1mmJDe^nlx<9t|mse z2!lCXo@`rf!8j99p~3U944wH^ zideS)TN_Zpu9D3^9sG?TTZ?~gWMMguSU=&CevBqws>YG&<(;yoWd|wwZSY~`i&TZ`770d5E9Fuw+y6?gyAnDB-oalXhrZ$(sm)ls z-0ti^es{WFIpsx_f3@cz=jINAFIJ%?#ic#{Na?3DZuMR{{G&eQNqPM~?2w{;RDy7Q zN?r}gQ{_UmJU(^;N^l%O?vBsdV)FQci-X#1L`$`Y+`o#!&GVNUPI~J+6xqKv6O5Ht z*NdDz_nh+YDjEEhPG57aX+pMlmcqaw)=SJdt8pb~S6{^aW@Yp?ue^!dc3@wGPzRpL^{=hg_;*Q7G4mxlwu) zbQ*q%y)Uy(&zE0$_ed(=a#V8XI4@sTb6bjqSfvGbIyk8uzy z8kF@Aan7tO+3y2WUkp>|yxyeM#1Nw0^=|1)V_$Qj4@vlDRFcsrZROSFQ@K(2+Ha)f z9&^~ZFLXLO*{y+Sb?e(RK6bz8*VyfBFqksi@LlWRwzwnnR!ab5i|Lr%hx zq79Evw}55VFG5$^{AwbYGybtcRaamQ_FG{$C(d97v zbR^L2>#Iqe2Q@X(QJ+0XkR$|E(&3n3#0Ms`_jKg^+L*2uwUzEx%XJlBb}=%P|cNp6k>JuCK+vOBaMZJ{KV4c^ z%kx)qKMtk0GyR$MX9p`eO+)x49EH7u0jVO>evPFyx{U@kqyS!x?37u$CytK*gN3YH z2)NE~eyUP3EGa*yDX_LKy7GOqv0~QoS*JPlV6IIMZcg>sDD>F=YHHXytU~*-9AO2CHX&fC&`WrO4@HD{r^z^hz7qUe z{n$}V3Bu?CKXR6`z{D#yN+>oF?&{AWRZ79Svq-KH4+2E{> z<8*MCf1GVP+JF4x!`mYp0%vK;j9o$GJotgRUlYr=RAQ}$T)3Voy0*DQ`aSzy?`uy+ zHSuJ1ugFZ=Lox-Ye%onq4@NS>71^qaNiEx>uhHm z6@|@>{B=gya?fwuj8(gmqAlFN@^_sqHk|m~-v~S|$jnJIN2UbvQfg8_Qv!olWD^@8=?I|ivt~y2SaOauZ6c`k#bg5KUH;2N1 zMFNgRP*XrUfwwLu7pCW6zc~4dUD_vJ-5zCF`5#TDazhej)(Xh4UKQyHZ?nT=-);>4 zF1$r^%#&W$n7ygS+)+lT47fE%7MxmM;`<%7l`avCsVlqJP}#6(>Ovgs-~OVh!t&K9 zM#<|6U^F(LW+ZMQRAv^;l2Y`TNiyP*QEIBxtXh6#213R^AzGoE+c!jT+BPR@1yPi)IM{*nPz<{L=k;5Q9#F|a9=v4Ux$A@ zhP4mLUhEPxu=RtaWzq7kGVva_*7;C~coy~USTRirp6OTUK;abfOSh#3+p}}eJeLg+ zKgjT)nTmh#n)3dF;u^_yCML$@)X5PdHiCU@NH?c`f~gBNf}y}=YVz}Ap|BkowUJWW zVW=+o^uz+civ`Mz%NcA>M$V-S&`U=*_O@HKFycM|w`1Kf4YYXMVu zBnh~kGU77HMCgn#@U-tM_;a3iDOk;~Kq?ow0l`)UXB$wo10xoIKK=*J0`SVyl*83M zgGq*RLWJz}iuMTI;zfvcS`mf3c4UOCw{dzt+=E2|uLOk)#56gGV6iad4|`ZY2g5s@ zXK?w&r#PkKTdQ`KWdB+t;;bK)OQoDT>y{>J$w>x6kxNkLi;#`-g^4R`)!V@!VvjyS zspJ$L0odC@r?_d_Lse`5P*4V>X`|<&SMl={&)u!Nj0!dtq^6D$O8sTg?-+d|x=^6q zY{%h>`1%EU^Qfbb%9=ts-GcV&+n(S|*sd@mqq-=J1jZI3oX%F|9q_f#ViHp>428kK zJPq_Tptz_g+8k2kki-VzZ6MVLS&4j+EGOjF1Cr5pXtPNeMh*zzybk8P8ROUK_+s>W zDN&q7KsH56$i@uvdizqkA3^>YDrkYm72_ui1zL&I9VX+!gYna#H7+{8sT}%fwb9J5 z3%!^$qo_lRF4$B{%FkXf%`SFX#+arO=9YK7%G zLjm(wRQYN1D=nV5A+?PQWobe}m#$KQnu2@lx!_pCJDh=L5ePlx>FC1wo3|&0?|l(a zb1t5}hUaR&@Vq~0m!yPAbE% zp>f7dND{YYM+LMjhbl84IPImIml4-QW^(g>BvobG3TcGH5VQBIu%>GH28OOFah(Ix ze-t3;=U7?D;N1Zp0p(CjEbHxMU#jXXn*=6Fq>jer1)D}v zQwx1JQzvqTs@2Yn`}0s|+H(lY7qZWUy1l5Va$AZpq&pzI3Y|~o%In*#vSaHZA7c)q+8Q% zro5TV;MjruH^#(F5$iU7`?S}YKT%Wb{Pvoy`zC4;$IpkYaeyNnyjNH^eHO#dhr^Bz zuXu!0KEOQ}3a3gPPI_|>j+9rp7AHJz1MdAk9uU8(pd z0Ls}00;p0BQ8HFcm4(XR8_VAh%0HSjCnoXWxDWT5LaL>yukz|T$B_meC;64FF7MCf z_j)63{}Uxm{{}$S=+)>`70Be4|ACOU>MYT?0gq)L4U&>f&Otx|s|-XjAy`S$@v3~h zFdHN%Kz0pEJ&FkKUl+(&=iJI;4Iq_(tOc<{3OmV%hld;B%`2eNJ?A8#DNf3R6RY|<45H11 zpIHPfFwkANY-OG046l8u1kn#$zXz1TWAknBHy*%h(*XMsA@af@;e%E<#Kif@t4x)^ zxr+BebJBYld%GEyNICuvmqil+P}#n4A{4FTe@wq9f$Z2RQ-%EeE8yk%{PLQK_a|vi zNCjfOM6>U)10vK*d z_akjf?SG!<>OdJ^+ebKKfK>&8cvi!A3`v!2KN66a4$L3D)mrO$hH@=p)gh_gWj0Tt^Dr2abbmbfW2zHDnO60{=o^@7 zhiu^2A3kKquU*?;o{P3QMNkvHvq?4qOMBnoTs^E*O^n6Bp$h2ZM=4Tj)@M`y@%73P(So;paeK<8H$Y zNbzxGehUp?ut4$I0UysH?A1f+`tRE6LV2k ze?q9e3{S&&)KIxysRVOPO~2B`&>a~TO0?=;?Jhy0v2b9Y`AupT1yrpknc!Vh&75|? zb1t)?Co+@-ZyE!v&8VU@0&2y{67nIlm$05IK-m`bXXN-apSlD%%XnH4y8C)c(Il5E=oL^;KB3qu zT+aa?@G2b1PIRXd!vNlpK))6Mz=NPt0N(z(ySJ~A!cRVdTJ^AqEX?9Ya*j3FFxEq) z@!|h;eH^b##~z|aH|jZ66etHf)SI@=CVRdPeX~w;~LN^3O4@V#g z;=&GYrBg1u#BrH+{u4x1(^>D$*&1)7vLzfD!T^_CA^d6i%W*CzwvF zpyU%xIZOXJHJ0GBv*N?De>*9mz)#-oqt50(x8QLbyvYQyMqr?VMG}G?{P#y(xdt9Vbh0c`Rb<3Ilw?A$7Z5`%>)QWm z=jwbl@m?wLE2*KEx`$F|ZHU8di%mmJUuIl%r_KK98WuRh@q+}!@)&jHqg2h)>=%7? zSHF+q_C!*vgN6Hy?#9xYlWA^JkFrcTxYq&9;2jat zbpQ!Bk*n0WfAWR89sYs8+ee$^I~UC7-~JJ|JZ5}NdW`6=GN&zxX+29Zuk0`uN$c(h zL?lE}7ul_Nw69zi9g*T=q;&E zz;=V$Hu{f*alf-lgF2`9u#aUXP*xvNM<2vo7kGetd}$mRVw5)tpE1OR&7OG zR4xKXB!A~l&3c~wFAEg<9}*mC^8$N}D{8*u*Y(eA{MEXtba;Q|F|P4pOUA%;<{d^< z58aP*HMQ@yu-}%}2RZ2QTb~O9Yd?8!V#yaidxIB=(k_ZJOcm$b@0m|u-}LRCw1FI^ zoX&S`J5udNS)V%c&FTiF2TUvv3!HC@*LjoSwR?w_qkS-C(dV+TNAK4b|N8Eg=cc`T zv?^pjkNAgEW%7H$_M%E~vXR6O?02qzkmSZ07z{o7rdf2Zy?UUA!fC-OBBEt=s%25! zh;m`b?u~S2R-OB4rvIS)De(d1bpd!7#M!E_@LI@eP-hm8zlJApOP*QqLV8*6y<9M8 zQ}6e$AWl4`G9slo)!#SPMmd61wPFiqJ%v&nWx=qpTWU_pV@7t5=&T$sUvp#+h3fWe zb)lRrEe6r8#e8%_@oaUy(#+_H5DI_!Hd*IK5jIS%ypwMjqh6{IgZ#ciNaxevRJN)@ zCB0KauHsCudyel~&7v%2cyTLfuEwcQis|eF6`h>iPXcX;oxqQgNf9%uIt+9-GYhiW z2S`o9o=ZxK&rm)0fv}`gBk3zRrt+>RKVO6Fr>n+V4iO49KGe4@tX;-_?}Dn_4!07@ zyuLB4IaWDNPl<1!h@`J%YX-&37L#}ZP0a~0QDxyr=YPjeaii?52}t)?D8Q2tZ#z1? zjMPXK;FTQ6K;8}6t3NmnL!NHyHYlqzl7nKwp?AN${JQJl#N)c64W`jlC2EVN+|7rB z?UqhY%p|lAf)6i;VnGPpg(cYlFte?h+v3wowz@}38;*fIvP zL#J4JJy%*eKX%$UcfBj4aeLTIyfF8e`RqTYm%!ieaXMxVQdq0z}o6rhO;+=|f-RtIah&2)B664k#fY7Obj23B>e* ztwpB6`;2l>BYJ_;SX)`0TA|Rd<~Gwv`3e5EoF_XHN`hCLs^aF$Q=mpU0p!5l_3IXD z9ric}t}aY2YX!nmi`N=RIbg9!ipDK>;C+^+u&c2fdy@Bimc&T`uc%Lo%n~$RdU7xv_Y4E&up6}Sj!9E z_=QyHcoe!g9Cp`qix(8;0Y`5w6J1&Y_df1#Quy8FP0p+U<$wIZ`W zcR2O%o8-e2YcFpfz?0!#;8wWYaD)x8gl;)Of&zo5SPZ%Y(-+aMLXFc2FniRVXyt7v zX?HJVah`Z1~W9&WaROf(C{GQ%ip>Z4*0Cn^4bp%HsbAS&@4clA!`B$OI z@%1Hc+#cp_tpCwTbT=)^1z4LDHxinqb3r#0bG=c#b>EGPpKsgkY=n7X<*MJn>jF-? z`9vNGc(Pb+(fG&grq_Xut+Dmak9FXoJiB)XOwZtmh@9XJ&r~>z?tF=No7V3C)!vuK zHF<4o?*Iyp&^nB$t8LG@-@V_x_xJnw!*iOv?|z5uz1Fjy^{ln~Ev@Lu ziW@2n`T9@+n*I!suhTG?;B>rKjK>8YSkQ+d?fRAbK9F=cyAtmmcpw zEX^tHkwRg>z8O2mzW3QI*~-4Rw;6AE96O6X>Z)^=ea>X}#;Z(zRcm2d(O@n5LXzgUvN-Z2PqC~Hbv z8$yVM4haKP%r~$}-bhS#cc*q~(NGy5qjryWp9 z{YTN1g}I+vzrq5@yADnShfW#zdTI_p<)bgwtK1$81QDF?5dhYn85qSBjC55jzrPQLwh2%&x3N21&T5(VwipyLMcI zCju>4Dt}PgkfqUUaB=;a;Fh5PTjluWef+Qw6{8x(^<8%em0t8|{K1}mv)pfG-+3k- zy#1Qz3VVrm2n@kPbmSQ!YUQryCcI{X9?I)OgN5vH?hm!A*63JBOYrVU# z{`=U^^-LY;T)n^U;$(943q2V<_YUDt_-5T zQiaf(Fpg_Wk8-UQ1ETZu%RJ9HPw^fzZDqeF+@1GAPba?XBDycJ58H@%t$w;sp}<69YH`a!j!d+?OY6m-wmRDtlpVI$6a_pI%~=}6+aPg@ zD(#|D6eq8o4!V+9ywoC>@ArF}H{HMU!mx*l@e;k8x$_H6JOx^Lx|&0B(~5$@QyDf@ zpS*I_tAnYb7p9#OHROKmSE)Hmn0<4Ymn3iA$nG}BOOx$$JsT}cVwQWNddTS#w{XH0 zZU5lYWg-+%wQ+iqR-l7I&9$3(*;ls|hj%)O(?q$U@ltQe$vkseno!*1jpv$J8_)6# zY4;%5TO9^j>i;^=K`-&lH+M0~i@qY9bEkJc(>yxaV7P)DTYwJacTUdM*uoFl2ik@6 z{TF2Y1Q@`u6Vew5OIGG*QZej}Isy36!`!}F4STe!k@5s{wU?@WE6X%B5cHbtBi%uz zu_6|ny;Evyy|poA*$dU^X;;#TzU2YaX;{_>5e?(CXMcB4{;fV<7=Qcd%Ew?x(3bqs zP_@xFZ8fy^C_e9to*aYG)9)aV=jCzGk=xNOh+HV*(^*=3T;*6}lr|Y$@xJE;fOT`z zy?ix1Uc(y=Buu3eaV#bzVE0d>f!BT>Fw1H&u3h!=FfQ!#jS87Y!Q~%qFTfmsL=_SK zKHuM`4R%5sot;5JlGies?5AX54}#FXn!YkrI}!&09fPA!15tMp!Nuh{luD!qM*59yX1bD?VA& z!hK}N25$R)<)4{UeR_}rIZ()fCSRzt1n-T%N&58P4?%_ELiEE=zxoJodx7y4lK0>yW8I6`h{e>{qjk*Vg==qLpoX1e zjQ59z_&3J@Fr@aHg|ByA0x@JtTXLdm)QEOwUx@tc-FX0`{UM*%NIU1Vn+RSrKX=BydK2Af@FzQSTD$S1yI>tMR|<3s0CjlR@+@ zTzu^XFOMG2ov(omgcKQ2caIQ?8_T%w)C+Hz6;beNpdRQBNMkecNELWDlTmX`BS3&B z}F!QZPBrCz=l4xX3^dPMMGcLewj#`N4 zr&ZOD@4p%+Cjub#0dV>_9%Y`iNe$BGq+vUTK5pCn;7z~`tfi5ktfh~D^xI|jiGSye zp|Ak8wd_C6UqAZq=u0ac{kfsvqKbh?i1bENr$(riKX=&wn87bn(ze`hlLJaxg(pO= zmWi!tDN>!ClI*Llk3z!G5RPSsWo?@3+V31X*e#3LIb$xXu|%r-rd?$ok~1VVAEyu%BvGnij6kw%{XwwYkMxca#>J>w^)252PKvDmPM4?X^$6Vi-dRl ze?N{I2JrNOdqA}t&CcfLi=h%JsM2#NNgn%L!euW0;@U;2f{)dc?8*$T0E{n)7{MB{ z{r1acX<3e)mVSEKm)Js&-PPQv?lpN`yGTU=+}HKIS2MvnBN?iodC`cn^+v_nJ)O&r zOth`k92lLdN%-yU54)tElZ}?&p9sxRT{Smxeg(t?gAAQFpxNEBY_yu#{PN~Sk+&qe zLt-B=nlr5^0OhaT{%eLo)C+cJ#;WC`H4nRZ4|-C$J%p7IxHu}K;(d`P|Eif-om16z zuA9%z!bTqk%hPj)LwAkZ$|*CM2BvqX&W%2F^q?wi`| zrRdZO@qV~%PlI1?%gLw$nmia)qqUEj@E!mlacrsz`Iw^JRnHUcyFwH_>9A#qCm{dr z>y!7~f?fit5ilVT?UyMOl_gGVd>XF+zpsW>xqCDb;v}lHnsVpy30jfZxD14yhRV7Q zpB1P;|H5Sx^_HKsy!z#XmRHANAr%KSqvqrh()UyoRXtWkC)-om?Sd8aso#$_#cluR zHM>$|)QEDaySEKt6dtqmfE`92nRA_zT5nSxsryf%7cpGlyTByjFFIcRDMMmKhxj-$ zpbdw~(SiSgz~|5Dm;c+rdm>Sh-Him%dCZ!+SNVYEsQjP0ZxBp|qRC~#+y|ORHPM-a zz6}pl_O?UD$7%YaO@%gNPQ9a$bD<*>2@C65a(HiOin^N$Nhks`GbjtkR*zWyu? z`6K+9%+nF2ls_;Odk=dj?xVX5>?6M!FgOvOGk*v2 zm<_)>z!jA!1t2dgp&QAGT9QDEfr1C9pa_@MXB7PWEE7@5le3J%d^mbTMANkV3rAi@ z{i+*?2?z?|atnrZCDE!_@x#np+U!g-z|QF1s>dEcQMfzPONWx5 z2aHndNxM;wrqljO;@@Hez8i!|$KjKY-TyEYf+eWEA#;qOvPpDP1mPviJXL=k!t!+Mr>$L-QLX# z4ORNT`wnphwynTcM2FHuL&Pa<>PuqioT`1~=M)zXq0GVXhbEia9riN*pcv5A$Z^o* z>G_sE$r$D6*?DJG$Bk2AQ2-xAdjCR=){{ks`{q5vXoV**+DAxOf6hT+q$(Ns!4mxl zeZO6Ee?=828)Xx#jY#XiTjC#`|F%cAQquG>u>WW|f9|XQ-ogKc+f(X9-cu`zEH3S? zTr_=XtnJR$;I68g2;3~b!;JfBjaqm#Y!ZCEP-q$J9W&XIF)&TIB}2@_voXrX zGar3tNqlsn%p{xEmmO=Db<7FXZZ^Z|OK4`sXZDIA2lZ}dE~s01I*IL^>YXjSn1^P_ z{3)nyAfwhhJ-8tZ=1$;NhY)`CB>c*SCHFl*jVwjVnR9BAy(Ss=`CT69Q`}|!KvX#S z`r+!N_q_;;DiQV3zfXg)fBtF`I_pqcmA``qjsN&cKN=9b8=!|Wnxz3@VL^S`T!1!lsl7YmB)tPP@nY0~IAnYg%HP~z}!=XElsT+!$|Z3RUKCpXqN zUz53_oqZNEKN3g6sxro0ih1DM|DIvow{=S&VBXTqrRkaKuiv0U#}uQm`{D(~p> zjp@9&r6oUe<@ow~-(p$7={=Sug=eR=>WzZ#waVv9D_qznkDX$_iR;U_jjJsjFp@J>ENIiE9& zpS82fv)+NK_a@1TUiDubP`QP}{|rQ>prae)-u0Eobj%*^dXOEYrnT^!erCnRvooDm z7_sI*tL0B%-FvWUgzKgOSN2^YV=}9s{IW27ey!Wuy&G4KYWjYzerNYCJ!cv7jD|m# zNPmBL^3?9`rriri?_x6HCe`%mdtnjkcZn8@{!3ISAw#SPW(W4Gw8B4(NA|&83CpFk ziUzI>O6e1QqWC5004&zF!dtmu$#N#V|LmU!RSaYrXeRhYdWoSMU;277Zl|~|o1t^U z_THE{BR!qWgI;>W2Oi6+?+OdMNQ*R#!*%>erw-Qg@KEoP&A91lSugYQjS=P5>SDg| z2Xr41EvBfd7d^y#xfX@YmV&J+UrflMLc2YzXy1EgW#P8LFE;|Ct$Cu#atQ8h zYs?FV%x|o~ygb9wO7>F1>#RdVVrO|#Yf)Za9y4%vJPuCmLwpjK###i6mHV1yLAaM< zZJ5~dYIJRd)45LZ6P;99OOZ*eJd#E3+U=lNE`N)~DO23$@M+H^B3%-}{pAU2fbN z^b$F#9Z1PUZq(j?_GPqir|&Q8%}8i%vdtt0s}U%v{5efBMK>+8z7WOjk<6ydjn6X7 zqfFBoGbDoOru$QDoUK=%u8fYY_3J}6 zCi&fUrajAHk2FeUjPmEr&CRhu#}?!IAKbwc7mH@7)FOr>4O)QH4$-l(%xl=NEYop9 zc0mO>{v0*ss>3n(?R9A<(K5g$)2qu#6K4sE%ZT}2OpN~d)t7~mkjwT-gB07ktf!+i z2lIN~TIQCt!LGB+jc3{0IqA_R%=bH%GLrdodVCT_CB1 zRsT;@o-^|l95KfL5kmR5^k(La!mA|zeXEM-pRrFY!h$};RMc<(u~gsQmDaMt9ghPB z6IK}w$^Y?AKU(u@-?+B`F70Uy!@Ky50a3YUrYWJ6ch=Kjs{QjEVC(v1m1k4roi{si zOZZ8?pFI%o2-7X_yzM==CaGWX1^1*4Ci;(q%l+6qX+`Uf;=KFc{SI@TaOAd(z;;~gtZe!@tZl?F(vtwu=7=76hGt!Y|Qg7t`t&aRWV zLd8r{XTOLoO>dXh*|J1cWl6W$1vYl=-cOnQiGrQqaa;RhTC@NbHY~hlJjG0~vm!0D z|LWpO#GMzJ1T5ZH^uM=YTRfjUa1W)do zA8l8dc&ca^iIVroo*o_|?+`>4i3`LRN+Vmg5N(>~Jao6uf=1sYb_s;QL>o(w9OC#c z=IQM}`<3CNXx{>}GW$gQAoJ(qhg7dMD-(%>dDYP?`^vNnusYHDI4*E}G-w5cV<4gD5m3>O`I zXS_~mBdwdN%Ey{^K62#965Eb#5xb+C+@w55cKJ~Lh^X!)2b4cK#7*&UWf%PR+i&a= zd$hOlj(mA>OJt($y^E#OUl=`NpMtsN&;dilgR(5m%-Wp{S`E-WfyXT3ENkK7(5d(%A)$f3P>)cJL}0Sp)r^Q^3_*B!aup=&q0CGBWx zdFZJ|*3dX}p<&!r*p}S-QHhT(K2amT0b^)18T@D*KY#9C%Z9?HPClJ;bO`6y}#S-ZrR--bUp>CCLf<4g`R+O;m?kQXU`0B04 z{b=$R&Z)Tcig0(yma_1B*^ggan_}C}}zGccR z!jV#(puI0ajbSDT_f0m8>-fDuC2%se3M4SdBBZme3ZNnEM_*mHB$M9TtG#pO1@$s` z!WOxQeAljBouLCZ58|wWX=U-+nrn&!l)|#)8J^tMi{Y2;!;ajnteAR_>Ch;*3~Lw0 zdRxe+G8i0fc0rK?HNj>M-xfdA5<;aXUvtYD| z&(SqMHSks?t@we?lsT;9psqB)+z`&J9T$FLa?!0e2ZV#oWH=1pLcbf=T`uboQ)D6$ zmu^Kn+vSBjM#q&Dh`N;V#>#eoOKmD^$fix3c*m}^RXGmh`qvj07KVu1n*^325>ckS@s_4#C%3V&u|6Wd8n(|7 zSvrVm^W(Ryhzox-Qtg^3M~Ch+M9QDFwY6zFm7UJ;4EphSi(F@aONlJfHyIY5n-Oen z2rBPj^|l>ZjuRx%{+LA-Oh8@`voWL(@yKnJ4pO#vxt5SypCDBEZ3r>#n;w4IEixWv z7lja!MFPt*`$atQ0C*$h{9_>$e6oJh>M1tGDJEIpbcP+pEf8J2$sQ_!RT&H9DOt%g zARzBZk&7w71O7{1k%+wk^Us|-_XBbU5B(~3z_pxU98x5)b-#L6G&!a>1bi9o^kl?^SJEYR+wHIS(*aso^F!_e&A!rAk4`bCDSa)&Bl#+hoE^V9LczIXjq_85h2D`}XE0tOd+DkI0S7tyg=_W+j*#adN3Ub04CoC&x@K zhPFAYK7+Fe34oRGYFNAv9z0lG<`;oE3hbj?h7~IU4$aBpZ4#0*&fXd) zu?3&$G8sdi=;vg38f7QXpgV{1%ABbQD8kLJ3+T$qX87Fw{rLeKyk@JwxbTqar`O}X1_ZzVYZ=eXb_ z`{JyIm7SB_gr_E`RV(&mBA-rpwPU`ztrcBc5nbc&6P|>y!r#ETG$>S+pIa&{eYYex z4kxD_XMcu_Vd*P;6h}rMl<0pF)qVCPC4)n><|ZxgrLElE=2LH_$9nW>m8&yk zgdnGWBwBG;;ge3<`qX|Z-z-ugcdE0Pdf&~#xx;)Mp2gZ)AH4m%0zpQA zzkh3XQLLrBkrtfip|4(z+|0s4Cf#;D8u_>U-_~E=%=LdVr2s(i>0Xl3F`KXkz__tg zQ?o6YnfKYAb2rUQXTOYgH{5^cq9Oes^|9xHEW|Fy3phn5!ZNY%7*#Ka5i5|H$OIcq9& zVmNpc;&Z~*TF+uv8?-QgbCawlYLBG&+NbS&^|n{#5~8!VqpC+KgVUr-O*V7Y(oX7&r!>!5DnJfCLV zaEsh{J$J;JV@#rrfRB{l(oJZvTwQp*)V_GO@CyW{4w5u zjv8%783PZ%iCWj)68clj*@lrPZ)%bTnsm(RZr8>;5#7YE*U{Hio?_mkiv{6uG0XY0 zETVATCahcdP(N-x9ftzMJrnoo5q*Fv&fW5F_)!>BD+<5+@VYS#{bYos z;gH^Yr*@c+l6HA_Ki-F?cHpWsx}S_lX{q7EBwT_&0*A)D%H5Z3*a-ms11^0G#9nwE zo13c=oB86xC7z_a=h9E8?LM<|W}DJqebAP-5{F4DMuA&d(Sl^rres)`C}(kgQ5o zm&X+qdW2LC#o;k7IG`&FH|+;VAcenjO;$Nq*;N-4rI|HjBl3;dP}V_cWzDfbQg#BN(zDF6l- z9UOyS9dqlGV?GbM6c4K8!}RqrqyEBhYm63{*P`Y%$3i*l{?1N+>NRYwo$IfUO(dw`c@|!g zo~&>+NpG@1Q=N8i_hQc!c~+})089pKd!**whWQ=?^2>tRo_4isR^H`((^k1%CUT9c zuOAcR-O&^v%?XxqW2&+&nGTYGIqJ7DhdNJa{^p}UZJx+stXGPAnQw9nyGU3O>=o3D zQ_;y((fUEi^zgoTlvM6d=>H(#@Ei88mgd<-;_{+nz9EIhSQDVQIlAT3=E=+oeJh?y z1RljU?j4y$X@iG4mc>W4YFu+KVhNx67B|ftWq{u#^!Jf0^QU$P%U&f*g1Am&*%I)E zqI&x#W^5DBFFDk_?rfg!LuOrv1XILoB;o&ZY4FfN!{p$Vb|Y4E=eD}>JXvKU8&rtP zI5kpULz%RCpu|@AxhJk?>QJ0Q8v3dbua?HDJiYpzzk3c&5gmz{Uf1T1_Een(;!Iud zKcY5zN6;cB0S`7;IAP}=lfAy>jS&_h5glbvrV4O;4NorR$-MzO4C8c%43V0{o51)+ z#jl2mjKa=6MLoQC7ffcX+a?XePV%RZgoVlP`&HCtuD;5wl9pZHbW26zks#uLymELm6xgK`;+7i$jm`wuSZtviIE5S>6~ zSWcflJs3F8vXTv?i_y#dD&RO?l?_&jAFjq{RY$9rZDj>I%BfbyV+*(SvM}ohy2DtA z;v|$r?wA%OHAmp)uXI2pbnY9~)VT16OB0F@CJUD!o;yMTB-{xi+Jo_1P6!=ndv(I+ z1M*~)*xMEEcr>M89sD;cXqtdL7a2x9kQ)RKln#L5R-_*s0RHUUlj6e9hHt~M+l2j5 zhN5KhUGm-1BQLE;v~^E16GZyz`j<+_La=SR5XKJPJu|)3pE7&WSSMNOS{5bpDV6gW zT%XXPYJE1yJ!6!{vc?OEzISeGg>Il>9ix|GqF3(1e&MB8Cv;+8OmgS|-taVWsi>$d-9@>|L7T!kAi&LJktg^H7 znZX{n*9i#Ci$>{w7bM{M?NZG&T#)}2f&P_FB&5!5%5`M32ukEt<^bRhYH{^Ss zPIl2NIr?5k-D67~Gskzi<{l~Yl=OBq`Sw|IaQB$m8syNANVI4jt$7$)A1`i5KHM6! ztu8dW$}F}itE$7;{D?eY9gX;N)*4CII!$co0ae}O>wHzB6@$dW7m9GmNZ?o|>01Ig z&X0QxAEC1IwI`21KJe~ol{Y3l-cB$fAp0M8XIvb?ICSjo#HkFAs#8e~>dc+@JFs}> zs&SR4PoI|7H3_h}Dz1>^1=9Aj|Lx<;Td65dNTdMeoTYj&IPO)PnVfE|4YnM9dr@ir*S${_|fs|I+$OzP!v&U>Mi@>{ci{(=*! zvk)g4Pf|CXEw^=(ts5QJEQA<0E*$u~!44yUeo_XpI?Av)qc`bj+0Vk((267p$@zK~ zuD7_*?2&!uCNgeXiWG$`mMy`&NA`;fcOY6pIBNt;3Wn>TXdtQUsw7VNKMp+0bP^Jm zg@)rz*w{cuj)bf9xaJFc>zb(e>u%t>5#e^6V9l59{u<9^W!3#|!gX&3vv}*U=9Eza zNB5*HH+t&Fg}=s6wbI=a;C)({Ls%Ql7W*|?o^O#cg51q>NLd8M$Ye&}hN{bbR7zST z+%lRDeosg3en4N^xu%E1nss85v|O&lgWG$L!xc12!^S6SVS=kpB`LR9gTqu09m5HV zY~Q#&o_q2vMA`CN7Bvds%33gawk8Rsab4&BFLn}b8dn~A?76SQ|Csk)U2ETIOj(5H603fH0WymC|kO|gIY$`nKe6c6Rb?& zrG|yxBW7iG(;6vIsiZ>Yd z%0G&==G8!PgZB(CI`o|9w+uPG6yV^c1eA@$qX+yoUCx=N2xI3cGk9_9zlm+UAAtuN zHAzbGFK1?Ru%%Ny!?-qC3X#?vm#&1P>S5bdmeo%m=hNi*u+d2AnohCcs90H@xU6;R z-9zCOeuka<44hUPWu>{-SoH47I9&~w_><33rgCc!{{M?8yyM@$t~Ij&y?2SS$4kDH zw%l^4k}nuqteC7x6#=R)wbKSXN$|RMIVde-H|7FWG6c@z&Xag)H5}a|%dpIrO6wZ! z-LFrmu8>Ln1UX|Juhn#xNq0E}dAk*PirC4bvePR8-w6Aq>ZF`AqM=ty?Y+ii)H_`A zihtm|@)fV8$yIefcTL%x7^b+TvNVSKlvyEU*788i%zZv*QtdBwcUywH*Ky}(drO+K zZ;M;2;2QHTb>l`?Wwg$`9ekiYR2Hm4tmd8|oES%w1HpD_*=GLSa2b&KqF6yjSx1Q= zr-&yAei0p;xCKuz5}~Eh<^Ykze{0qZ+Z#@tH7j3u%R>T6g2lTsldiM9ewD>Hm5co< z1m>BdsXoQLVm}f4!c3EyJUhyZYxkK<0a8MSt;ruGb~KC3quQ=mehjCGkmkNAL!M?8vHI`WDVd1L(i5 z$_ulsY&prDwkCUv>h!!J%)?b>zwvf4g^d}W$aSwBtM>B=tZU=06#Lf{C)C$%2EGj?J zr}w^hYo~5IO=$w<0^!!=a#h08W1c$u7Se;at5AnXa?fAG$NHAhU7SvFIx+zL8eJGY96|@qrO@I-nA?C0tZWhXt zp@8D^A#+MC2sg-%^L2Tfj=_aB^(;DXcyg-&j={#71LRXC85lg%GiU=W7I8J8alnx0 ztdh>#%dBh47@XDChQVzO&(M6L5XtjJhEi}#rZ}-CtIlHxX%_Zk*kR{YMY^8zs`?!z z)yL%qQ!Ya?Dcjy1ga~3GxI;YUOvF`bGgUdL~x?p)SCTQL{ySVj!tQi(>1I%q7H|%Hw}Idw%9Pvk!F9R zLkycUy|LNSrtsRT+3Pzjk12c?YZ-chou{tGe4+=lBR`6&mQ> z?<>K|S}Rr8D&6_5rN&p(lE&U`R!~v@UW9-1VtQ8BFHg{kqPU6A!(*#e3Yvly{D2x2 zg~Pxq`kdMR2zGzy7FelnNy)qS#>xAGP9EN2*Xm^_ctE9AbY8SthBaJ?eX|# z%Jp$ptybZjs|e_jIRsT^Z|q{efM&!gUeTn=0_)YH{?DS)+l#*F?ig?+s*qW%Aq&=H z2QAjdQylscm^S-@ac-SBdtRp#&$7DHs4=iIBcbaOu4I|j6vYI3>^K0f> zTosF+%&#(wuh+bF|9rSx;dOB7Ny~98S6TBzcrD!a*%(y&Z$_Z{tQZLej6uJ(nz5B8w}v zhgB+`vJzJU9g<#Zz`P(UP;=>9y2#;@>WJa*7N=F;zmX3?x$>JP=LED&DHDv0c;h4T zy{I~ps}hNwsT64>gL3f%0@`%}BobbrJrz=tf1dU^7sS+(2}#Ziko_w9a{^`X#3iBtj13Or0;=M6Hvg zJ4-x^B>j2laGm*UPpsR(SEHMP=O<={&Ghdp$#~Gt1v^=kU34Q@CbIOcXmSjQ5d_{S zn^|uo5O`$=cO9xQ)L{jKHGP&M?6npfP4^yS8*s3)qH5`QH}A2}1B&D)H3^rwu~^0} zY;sHyX9;f9p&f-|QRoQPg1~zv;)-J(6*t9a_sQqlH9;*;(a%4&K$#T$7E~(XYLg8kO{1RjZ1CZ1Pif;u*HTYAIXR{<0~!DfnXg#ikCS&!g0rzpXn?H(T)0&9gL_i@sYrG210Xau;v}D=F}@Wc zJh4;Z4y59d)Nip+0anyMu{&P zi#vH_AKDq$0kduzNIl-2+BI{;qehKlkBw^vs9ZPwbPST68*eZ{Mr!xUWT+wC{wLD} z{b^0iGY01@Y=f0m(GJO!!2G(P0W8$`pM!)uxn2TJGrly^#M!7~exS`;@=e40x}!gY zM&L@XbR%eL$GJ-%NF(J>;zk|pJV4X!36cSJvfUHme%>Zaup+WH|KtO%a33n#ZLQ-w zRTUXvqNFTSZ!ab;m*~%x8V^|*HCdk_3t6o4CLKk#?7t*&!eT+Wrb^WQE^k#G9f}kH zt4x60mf|Kq&27ECahLImba;+7eIp)Us1R4Iz`(soCr+CVBzhd^gqRLL5OU|IQa3H) zuiZXMMVajp--kQ{LPt*i7Y!^hI#rOQDK%3VWhE z@#$&X%o&j{u2mhGQP(EaVK1S{^DyvNW8!*3GL$}?P$Ax1jsXb>Prd2&9agWeRY}Z4 z%3ND3ED-42`@aca5X~yH(kus^kK`((!AE#OI#uAf$dKkIpaUz|I3U0J&Gze#tuz$E zwliSo5rjxb62!xjsY>F4QX2?YKudv)WT_{@BosX5((Yn9$+sVqfb%b(=4OXn5HueBqtTZVCKQ z97D8G+nos)SsW0sk7V|93h1{U`n2PD0RYqil$dz(nDwbXizYM~owW5FEd1N7{ z+7zP%9);Uf42m3Fy#!f-6v|6b6%8m7a1~`zCx|{Mlx}c|+EV7fNWk$3lR5i21;i-D zE4;;*onISd**MffgBF9rF2(1z85j;gVLqYQ=RINFV{jIpifQCoY6 zFluUQ>Q^Q{+OEC7b&|v(bi`obIK! zbc_2HO9kdPfI{s_-xlxz+WR*KP)W=eFQV0jR^MB{TW4C4>yW-2JsdOjb4s#LsbZ}Y zLHsWjtib<93Zi_pQmaN97Gri>Q{!#io0?eqpq%tWoqc8{J(_w{xw2{gpdhNq`-Q^y z1D>H}JKtnm;Z%Y7i)bO3In0)(&8UM|Nt2quZjva#>m6{?Yoj5mtA1bl}-bsM6U|1W35z{KrR zVVVAWi7;HH_i>{C-Fvq=O`#oK8q5nwFFo$73x*WP0J^)K(44Ldq7qS@0;#%{@xPP+ z!_mD@68)pmzDeovEt6Q`rN!;goPm5=kbu0>IXDifZgx`QhWW7{vtq+7y+)nX7u_7C2P9BZxV(7o0kw1L~+v03O j@$bL3`f!|DgIrbP`va1Pu~Mkp2>EK>lDWyVUH1GR3NTDL literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq new file mode 100644 index 000000000..3bb2b39a0 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq @@ -0,0 +1,32 @@ +seqdiag { + app; + fuse [label="bazil.org/fuse"]; + wait [label="callMount\nhelper goroutine"]; + mount_osxfusefs; + kernel; + + app -> fuse [label="Mount"]; + fuse -> kernel [label="open /dev/osxfuseN"]; + fuse -> mount_osxfusefs [label="spawn, pass fd"]; + fuse -> wait [label="goroutine", note="blocks on cmd.Wait"]; + app <-- fuse [label="Mount returns"]; + + mount_osxfusefs -> kernel [label="mount(2)"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"]; + fuse -> app [label="Init"]; + fuse <-- app [color=red]; + fuse -> kernel [label="write /dev/osxfuseN fd", color=red]; + fuse <-- kernel; + + mount_osxfusefs <-- kernel [label="mount(2) returns", color=red]; + wait <<-- mount_osxfusefs [diagonal, label="exit", color=red]; + app <<-- wait [diagonal, label="mount has failed,\nclose Conn.Ready", color=red]; + + // actually triggers before above + fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"]; + app <-- fuse [label="fs.Serve returns"]; + ... conn.MountError != nil, so it was was never mounted ... + ... call conn.Close to clean up ... +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq.png b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx-error-init.seq.png new file mode 100644 index 0000000000000000000000000000000000000000..e96589c13452cba6d6e0d821ab0b487dbb4656a6 GIT binary patch literal 32618 zcmdqJ3p|v0-#>myi4ryyO00G_DLE%oqS)OwLN`rO#vvhcY@AP{QnrZhD5pW~h8l86 zMj=B&PNT@F!H}4koO2vA*Y9(U-F^1H@1Fbr?DP8np8xOn-`CbOGuK?-@AW-=4)6E- z^SyS=^zgbho7W%+vhK)F-M3?T?f{mA!+jt7uGahqblF-cSO zOHSI)-1feX5=6mUXwK+ncQweJDykqqDfwyDBNg)lwK(eCg~(gv@l8H8U^{{&th>YwzdznYb9ZD+#0dfvv+&V$`Wh8L|#!Qs}J=md{0=S;8>tlf-`^Z{Dcs(;J{Gr)AQb zVw#0#P+jgM2cOK8{H;Iy7uH*!k~2ScZB~fz^7r>|9iLNtZsXa)p4qMtansz@P}pAK z5KU)MU*RL_ld^nstCJl4tn9+dXc1as@Z4BW!mn>WR-MyYlrRuhuknsnuazd!7q%&` zY(yFN2Do*Wu0o!i-m)@h;HkqMviJeF8mJH7O|@Yr{O}iZ2sx2W=(Svs9j*gM7<8TphpX`D+Yy~R1){%O`9@3R~RP{^A^L)>6vj+p)q@y z9aGc&d)AUuLVFF3yFT}pHiQ^AccCF&3bTR=Ib&HVi}iZkQ|XrzoOiUYVjM3Q{w{|d z$2sx9`jR{I!;1WOM4o9*&^Tdc@9?w!Mk15ZQ6xEZV&3J^u=P59 z245x)FxqRix#bO`_*$!z+sfx34U|lBqnPu#oI4lVR^RhgXr*v-f<>d;*SC-(Lz%lu zb(~*jvR*Nh8;ZxC`?m&vD(Ad&W)Oz1S#5lv+^hE?m_t?Fabk3}fn`sv|F8pN@r?>6 zY5aV-n2u;=<0{5r)11=WWp}Z*;G`%+s%Oo~QRkk$>{3~0f^S7GhdDE=*M_ji=Hgj#9$hCgRv@ewA81=ORV!0b zx6@*T4>J{d?M+Gr##b6CuRR%I?L9Wmr*{;xc_({%i%z*s7Cjm9Epkma3)CCFGdSlu z-i45Nf%lWGytg8Pa4FRxHCiqhUXB+T4vu6uEwV=(1cLORXPslupVpSbC8qM#4x%hD0OyURCcO$_Ou{5u_d*F1-Efmhc$_F-^h! zm%MPqIKi}i(De>2R0h)`;VSWN)RaQ<3J&H}O$O+?COfI6n(800#o5-JpR?sPkQPXbS?K3< z9?7V9AaN5qaRT4=*7ty%sL~7@xm@2?E$*<-sbcrwzj?kQq95hCzoMSQ+GX+a=KFU!lN(WC zbRtrJiIb9&;tw~-&1Kgu-N0G%L`N4%ud$(~`E0Sd&O;CU!`o)u#|vt41BXX*BD^&= z-_X-Cv51@~2$_WDM|F3nL)0RyKsci`&>S{fh(3ZlyigYKbau>+oqpVgwS>)ellk-! zLPwLXy?6p=Ci?1kQ8>i}l|u@?RF znPF6Z7;A{RZV~&}A8+(ocMB^ULssN>r4Sid92KJM%bp1uz@{b8yqD@;bTi(zyP_(c z_c<%ixVZmHNOI$@46Rf}7Od7qF7JK^+a&0D?^(aDvZ%CqV-)>{zgG7jLet;;K4GS> zQQTT}R`+mP0x<^1>ZH26GPJ?#Q(Tfu+!?O3l2xAqM;3{`B2IBZgAxX&N`vt&KH4<_ zdfx=^CpnB6oXJJ~qj+fOBB}TzwjBTC4f!&i*421x;^~CYR_DUJ+UoR{(GU4Uo@HzG zN6SLIc{M#(o>-&f+quv!TFPhGN~lK*fBPIkS@D6bNlE(Rg?ryi=;`fMu(liCmfpw= zA(9FE)FUQ_nJRD4{YS9J_@}>F--Dgy=^fTI! zff6bEg7KLI-~MXGmD*RWKKDF*`qihj2p_i)0-|+Iq!xdalKaN2^-B4hE49L``*BB< zAUCN{0(%Po{rikXo%QLI3wGJ62VYKXj@DL7Gg{ok&Ho&3N49?R<*WZq{&Wq;vMl)u zYl?9H%$L)xtx^td#>E0gZ~h3lHMsJov41(o`gVxyF#Kc1FMMl{bFNf?Mx~mrsRk!S zZfAR7iCby5@!s%hNe4F~-NoXk!m_Zo*{Fu0HuWgR12W`yeJ!~D!LLuOQf9BXjc4Sn zvA;k+vIBls@g1>+;cd3{`dMMggoFB-zvo1sNje8E$;@i!oe8|=dPmxU9lIJDNib5Y zI+U~J))UvU#H_Kk@QKQeI#O4|g}%qSijA3^oYPeI13BN6!HwyuaWZ|MWTs$TFhA_2 zzETTXdj>90>hMAMp1hXW=+j7vYa4Won)cOsHb{n9apwkFpwN(xcIDC4gPx=!Kf=;T z%1vU7dX4O*xZ|b_YM-nP_ z51>KF)$~8japrKW(ym{~=Jh*;KCa-(BS^rJOGz<_CQc=-dwIph#rkqLejGQPPkc}- zomEJ3;04qrJ7{kdGsPa>^tN5*{&N3}B$Hkm2^}C4jgT(E_C$xV6v_v49~D2%mY?FU-O(Ad~mTUAxn z40sQ_eW~q*4Xd}%4k>;_VOosWH~PHFJ$XAYAVAuLpIgm*qG);Zh@Z4y6!5Q5THiR4 z1ml3EUEpn+%#L2ZF0ju4>#xl%yQ|G$rieI~(z{7%GaK6*ncL^IfUe^?oD8@__&2VZ3XKyml-nP~T6?So4E zT&`;PtH%7l*9$~}>E4`k2MCEOny%bvcUURIMVKirvPm#}aKaAXnLjp{^~BlY z>Enl~WeX3L@P z3-;oxA0!JPeciAmXY-XTt%^Jtk69;3Ii+a%XgB0kG0)iQv!P2Iv7$2vnk6BuCeqV_ zGYpqkScT>%^HFc%TAsw9;bw%K;Eumvv2aXXa6)v&I^^zs)`{21VBq8m=pb&RmRI@l zMkHr)ro$EmoC-l^3v}ACaiPl$hz1LHX$-cEUNQ`z z4>tC952NHl_d^Z!*Elq8+afn~#^e?5aJu(hSquTJND6l9lqT{VRKB@?G2uJR)9F}5 zKH#}BsB08HNIS%+50MMKglulwfw}H$l)a#1)2ek@dO#~f3-X4-{;(G^0elQGT(iLJ z>o{UesoP$;SWhrX5Za3<=w`-Wf?vmAzrI@qb|$QId~Tdb_b>bbml1zOV?u)uJW4rf zCykqK)cgWCz*(k5t<KESb1CX*d=WEkb9z7coQzB@#Y&? zOoCoUOMio({CnH6x-b5}1?j#%gmX?8_^8a454U(9yB?bP_x2FU`fq{6;bQeTj_5(> zVH7qZiEo|0fIV;B@O!8vpabbGbmrgN!TTyb5PmegZ!-n2!?*y`pdlYEppzIl56-!v zKS~WoQ!nd`kezrGUY_knS#Kq(awdB8{bM{s_LtDo_bCszIr_0ABKyt+-RD!U&gYi$>K$HD5EfhTwVLbVsTV6SuE z*?c>VvfC)t&|`e1u+F!{lLB5xmed6Ut`aC_ zV!mX8=|LOH0VJzp=_F#kVxuO~JYmjk9}ALY$_WzgUlIx@4#i4AaCvCeI)wF8wP?j= zTM~E16N1>7IuYdr*T8uerM$fCXwfNJ}lL}$t+(>_a{~}lv zz)#p+gM~M2SM+_v?4)%LA+P@P#DrS%(e0NrT}G(x*<>Oal`Xx2_@sc12&V^!gvc&) z2W`5EB0HUs00C^n?8|dWrnocWiKf%XoI0*u7U;ua_w(s>>onK!5eZM&ju{qgFCuHp zwj(>L<-bM{VFl@ul?VSEL**YF6h%~zfYJJGOPc=u7|ap;_{(rF>N>L-6k{wJeQ~hkN2k+!P63HrkjFgm~Fqk_p!=bp5$SPt^YXv8gA&j!!W4eGGACuN6JyGR z!+0{98F)^+=-Tu)>t7=Rb>et!Xv@*8R;{&8koHZ8UeyrzbWU$!oNRa5Vxs$=d+mDC zLX^Y{R#oay11n8K*40~{rAHp=jCE{7t4h!mLQ_=|bY0h!7lob$y34`{S0jCa-Zilu z>1;j{`m>;pBlMqQqJ!t0c&(cy&LXFW45h-bIIosq$<-*o8!TvK`6r>}Wo`^!>|_Ys zj)#6m<;=2L0f*6IOTlKfGlPxOcS*MNk`ps-ER>?zj*oTA-f5**Q;x?qaDa*+zN78b zv6_3T-9^pAH#e8C?7SBQb~ytkI*ra#C=?eXmt=EuoFEcBaYOYtJ4u?jEWhNTBiq~V zdjP;qDMk9sK-Pz5DzCJxNnsXOY*yV^a<{K=Hdo{%^@>VL`?&e;$0Iy;7-=Nzg!^3g zv9qn-tm2cS!QNf>C<*UXNnc$>7fH|0Cuf3LbY+}?o%!hyW|Z@6X}pE(lX+qK(xRI# z%Q$Z)N4OuVuRlWhJ^Gus{E%N1E49DhMrxaKP{n?_BvMcqsQnSyVunMz|;)Kz| z<__>me1XD~xhf$yQX-~a$GFYmuCwBOO@ zrtjTzGb41T(}{IOR%xuWZ8z^NjQTd{7dg7nqVyXWIU&lYXKC4?bA>MS1e1+w?Ag*0 z9HHowl6y}ZSY|AE!{ReoRAAx~iM_!#+mMeyq#|B5Vz@d_ez+R4qC>l;bBL^ur+*?| zdft%#E34ZXf2Gl4t1Fr^Teviy$@MrL_H%7m^qOD%$9uUC07a%y;Q#G4otg1NsXy)$ zM9za#>k}Q7eD!Ic)p&qz`9q4UIe4{*iA3`?=U>RgU7bk6LedA!@z~(!y%>fIcW?(% z5I<-ozq99iMCS+?GJt1aK*%W$U~wpY!GpRZEB^73ysqj}|M4)b%{M&2&qP+*O@Lp( zdL=$H`FF4>4wtYT!n7@VD*tp)NnR5)EF?*Q%lkjCw6?4tmdR~;K4&r^)6yX%tN z>sJ1YG_Mo=tbkY^g6T?dXEmG#xPVuSW5t`hLSzn@{}H98U2D_R86#u^=D4Cl;qIdis|@yRxM{>EY0jTCCyowz@`mJ5j2j@$kE2`~s$Uj3JtW`V+spIPm5a@6bpdCzuQ7M! z!-*l?CLWU^VekNh`}c6eNFlzyzMN_)+ok>nRoYU|QoUW_K_|KTkUW4Q(xE>H0Knj| zZPp20#?bxCe02Ntl`F>-Yvz?Kuc;5ko99woPj&bLD=rT(F}JO)Z4}nVRSAQ@bnplptAAA*|Em9v&XOF+cvw2&0ko(?9RccuOBsAq#}B1N(VUSRvv!Cd(VM#vWvM9E+i-M4e(;?AgYltnAiE> zq7ZL&-a9M_SbLcpKg#oIe<$d6Q>0;2wxg(hiE>nBV~#6j8JVBA*V^231CaUY{-f$O zIe?I2iw!0>zg1RkS|psMI0TsedIRA?W-UhI7+2lX>zPS(XZ;XbVsO&P;uq~i8>X-m?1^RbREGVdZ4Rr;Xa0}Ys#FecoTL@u`L+;)VqUPnCLpM09*% z_x5Qrb~-y5S`1YkzYqS#_5J+o)w2^iQ&gDQdq;vR^jE-Q# z#jD})2z?rFGqxa~zyuGGTQhGu1#&5)QVE0P)+xO^9gog8sMIo^peF98ic#ND+ajjP zD0YL5rq+GYkJebWuUel?wc61cGaYhD(9BuY$U_DY6ZTo_1YXp< zaNRI*Na0oH$Zf+fTX+A zKuyrut>@-FJh4>Q*ETyRMbpV!zM-E^(SDS2*@;2c%6?Zk7SODK0qikNA85K=DvA@b zH5eN1+^~#>b5%xhFH1rfn@eWdmk|KK*Ei6E#5;=Hl+`! z1b~TUsEd{2VH2gmP$aB(Xe?ZCy)aCLYZJ`Ws($3>V#V%-E-%^!M+DiANOd#WFZ0T~ zo|fmbU5p+QgqkMw639U(so(v`C%%}?|T^3tGbc$i&CK#pj$&+VY?V#k9QdUgun8wH+Sr8E->6p$PuO|e?&_mnxT+O0 zz5TLckQ1&-bJhM{34}$kTRVI~4g>@vZ*p>Sc5<>{cqT|3-U1vG%P#R4s%%`!`s@JD zp%QC%HFb|ktC^CCg;8sZ`$ffiz;wWPg6)o!cmcM&luUF@9)c&T!`}3J&EErQQ8vZZ ziA%sJ3)w4nSD!aU#y$;N=j>E)%1J|7bj(1wKT5TH-jL7{3)-ub__l#_T0taV-Fze)1L|ZEST)-#Ma`(=OLE7Kfnik2aN5)uGC#}kMC*V?mAYKnm+Q{hm+|&9hsyzr z3BL?UT&3CMwyjSh0&1!4l(f~ro02q33oZ}-U za;hg9pYD@{q~P2onY)@!l}EQ21dw~uAR>eyj+e-IN#;9ffX*ILM!fxo-EM2Z?f)k1 z6mI%6SQ}#EI_EY04yzi0i=Hqbx7RH<#L z*#rhqt9Hj?(hYGKaTUz4?KXh0RKP=op4bgs> zhv6duO3=gB2sN-YM;>DK1T2aXn5Zx50PhxFp!W(w*76IP58?c)A^Dgi?Qpte9o(!K zOPK$v!tkK;Ao=-f*mpnRl)h{cUt969ICu~b%#-|8V-`-Y#|4W65`X=IIfkkItJd*u zA)j=B?|S}Q>2B(6;;fu7^lR`wG_xj3ovL2r*Sd&jD?19#Q;ax`fD*xBd%MIA0T^xuG9_Z<~E*SExBDN7jWc@>6lO>_|Ys65z<CK$TMQk_WvRlrw${@0CcL>=};jSKUbMGL}uOeXx>_t0faR9DU)fG=O;s3mnVm zYm~d9jN`#OD#u4jF0M8C_<>12g}4rsadt^00&IcVLr0Z7bh;S zw*+7Y2wv{T%UKvTDc`-eFyK~^ks>Vh1GcFu6ao3hV(gOKTClE-s$0^D9s{+u2a0HHSl*#S@pc%F|bk~bt4~}Mfx_#l&n<6f|%cTPx$TQ z^omLTyw67;6SBE#2WI7WSo>emnE&Z5_=TPvbmr6l(905&KRw77u7pdzK%?#3#rkwW z0s^-BBj8iMfH;NImw7fo-Y~chM&l+>YC2|7w}o=uu#q$SCehw=`qjlK& zBLaM+s+2LjG&dOTlz!K-HS{EYK-q)lYIC!CFnd<%Gc1n24LB=DRoI==OyKwl!jI2O ztaos8h#sXNPK?nkA$_=y*UpuivMdM0Iz7C`eApsR4;7U3X6 z7~ZNjN_3p|O}V;#>thHP5U=YiH9tA_$ZDp%xJ{r~4Y7wfr)-LKS2tZ~Yb zg*CGSXjdE9PG_P60~azSyYXzXXIzW%s)hY;H@<+o7&H2VmAq3;OhF>jj&{Ocx=*IxzUg)p`0Y;Ka*5)%kq-!R;(r|1J+*IVR-K z57|NM4FZlTt>XV$8IZPB|IF|6BTVU)nibV4|BNEr%qZ39q;j`R*BWbYEqn+|r50Sg z%6#wbJKBN3*T)j+N|=$^0}#V4^JGXf=GAZi4d5h1Hrh_;6IyT3C`fOiRC@GNf|FQWiHUq*2@dmi*oUbwD*JhdB`ai26!7|=>FH-W~;1=}%pX^~sDA)f)3&~r#ynQVn`&F$wup(Pd&eT*HJ8^~+Gx{4KT$uu zOSAA*8SQSfj^JYRy8QK}~4$%sLmui$@`6*HXYohAt18*Tek*|gnKXNR4 z!#eWMB#fRY8Z>`#)1pv<9$gE<#D*OaYQO}Wt#ig=2gL13v)T=Of7ScPgq>!aimjx= z+}<_6pE8+CNwv;9P(umT&kAy!v`mp>Q){2(9%!I7%QL4!+lxY5(Vkq6l`nH}!1~e^ z9ub{LM{7?)><^hhd`oYCG+z_j>6&moX)tbf;mG{^x_P**LN$fKm@O;|862=I^a!Kj z*&SQ`1G(iJwHoFKGf&f7(j3`o9VYi)Tg(=Ar-d>CJ%*YY6Sioc|MIejT8#X*>l?rD3(c~tz$Jw+SIG93Dh@VEyfD&!V0C0rP71|A<&lPyM=MbZ zpy~;byxI%*LzM#%P-B`oFm%U$iY`b+^-4eV(&tWh^B!`#e9_1YAN}+?N@mHY_6ziN z=TI^`H%aI^mfxx7=1|hCe7jk4o|8|rHs~rBJGULnT$nQKJYHScfO3=$_?LEm_n{Q< zo*4D3(gUH<>dh>&NTD53QeH=34esp`6c@32I{zCMS*)v9+F?Ix<;zhT#YM6tkr?H? z#ZVZpK{e#`G?5asjF54m3jwm zV!_#T`5Ix4HvE*47H2X_XiEbzMO#`mht4Q}^xoN9o)NPa2~oY-wQsvaLY?Pa7cY@x zwO72}BDT~_;)%-w@m(Q1?4)}k`$HXLsI03vm387kAsxd(61^456V*=0Ud?7z-*Q}Kc0KR>2` z2t^!OdZQ26;~Q(r1sbTcZrOLNoZB31W#UibAd|?#d|CVTQ%a>zoo%MJX5^mv*>8CMB z7eLu_HtKQ2G+XW90kwnjt^$5q=?>c?+K72MDFJ@C9Lqf$oD=F?J50<=UC@S6t3992 zb#Ofy<0T_^24~$j-+J#j%l?9$qt?N9@`C* z5I=Bc_b&9B+YMO18 zadgjG#}^?xS~HJr=QG6>UHi`-lynd+>}Qyj{5V_Q_BEU067xKLa4_m(U3q_9rcsQq zyH$B}=4gW9U3_tIk^38!<|+!Kp}{ZEy7bBU9%8=A{JXh@?u-M?d$v$FOvgy_RIW#5 z?6J-z6Wth83Onc)L>2Ew-=Abg(3To#OXFcvap_Bp^ra7qC?}S*G(ejBlk5hfG^%lE z<-dcsaJc@g_VTb#9)-NnQ#Zui|_tCD|fP}eX_OVQpdu{4F z?_ewtubMV5g*K;iKRKfeOmj_|ZUv$8j5uk&ifL}#IhbFu1Qe%@VE<0$~3u{=(bI@`;a ziD(^7)xeFJ!#?kfmD=nh>%zqYAD1?lQ?DSUDz?_M@vgCA4kR-oTi8_#UH1|w&Q)UBqg_$Dusof(m3#!2W??}3cp)BCN+c6%; zAsYNs%`O80h~4>@F&GS03!_+YT}H*LRA5`cAQgFddXj)J&<6xbJcF?!;U~U_J%#0S zy1Kg7wgkeAe^>e2!Jp?2jAf3d2g{7yidk4vwY?hD=J-6;_meR{>#}G}!hpU*Goj&XQ0_||RhnG**T-Tc?9Dns8)TL%UlXi#^+pq^H3dmeD6eB{Fc-jq~2 zk6=xG+N1(fxfP&p!&~ONc9l@UMNiF6Gg+~oJ>Gk_dba8LcQsQuxvuEG(oZPvVw`L8 zD78EvZu>F!H$77zdtu=H5r5u{rye(l;dgqzfPO*v@yOFC&T0SPK#n`d%BK8bXm(8L z&iJk;d4x+32=7p`pNhPvJV(!yz)#KX9+8Q!a9Lzx2tDk?l{LY|j>8|q80BJnUOFu# zHgHZ$J(hJn*PKa-!;c1D#|tzUr0>-OkO~`jmD?I@*`RIjLa(xvg#=1HXm!+)^y=B& zOxCklQd^jx$irC?L$LNAP)51^DXAm}Yz}T*pqF}Nn@5&)Cp2}`;f9KS3#Cz|7gwiJtDOsP=t2E-VWYFd{)o4S> z57pQ&PmgMnuQeUNZ=ETv-xT}2BWn5M3eEfR?<5SiIJg-KE7UwC>MX}3sWL%=>Dd$t z;kiBcwaOn(V0${{TklQ_k;&;QK6 zC1+whsTnKf>LKs3O;?q!038UJ10+;DsNX1+hawyl;8hUHzyjWb2{RR%tCFfYx%{1B z_ek2|6$Yk&DsY);1Fr^5+mFCmV#N~R6o3W;4hXm@s7%~_v%lq~w4u?%hrq0X8^o;9 z^OBO1t-ugL*#tnc)AdP#YK42q%h51?X$R<-@%RI?9PT8Pr+Pt2jvv4mhQjxltW>CeNUDbwMoy62V#^-n=m85Nt7xhE?CJb;dwy&;zO0ls!7N>@AQj zjTFcCWs8ns=9t~h&C6rz>FLcvy{wwFE>=1U^dLAUsN!59qhfCKFm;`Ca${AUJZ~bB zL;_w7qwWDOPFk&LAGu45Z^Amas~%r-)WXPwL6rx%=(_qaICi)qL0?hea->txD^Z7JML*Z$hON55?{jU4vxQLsez`(SiCQ`Yt z-qJ720PhZIs)6;>vwn%G0j7q+usogYFWFrbO;s1S*%bC}VIqT|$Xka|(@4dp>1>Va zSO*O~{2E|^x<(i9DG?ezO-r{rr$rvKsuKLt7EbXmR2gbh^X~utb7409>3T^*!I`nd z)S-B*WwYoRMWi<-G_iiUn5*+WNeVL(UJ?c}o$2g)Xym_5Y~>$O?m@8PRk+8$R~-J; zL@^&9UEm|xw{3Y9UGoyc5X6a<>NPObcRTH-LUG{TsN?k&ldwYkj&;o3 z$K=086V)4J6YPZSKv2@RnzpM4lwY@?2+qFyna}CI^>EYn`9*dfsnk8C+Vdu8Aba+) zAE`bMvx15VlWcu93i*DmTl!49JdvxxWiA@qpc@!Al2FV93Jn9DxOMKAs$fUvOk#kG z=UQj&Q0aHGhw6gZwr4`=II8>U-Qu+kOx{Q^LzT3H^kPWfbdg6%{H5Ziwo08f>NkEKOK> zK=9<|=IV(8pA9tqLkb1<;B#D;Nld0YXadpbXgv#9L*GAot6Efx_o3W9I6=Q zFf}%GK;Ihd-ZL;unzCZd8K95`ugjctzQmfop$ZEt{iSpYC@b{WI`@i2R{3Ml7L;n7 z#LwuI`cREzbe7L)NQWp{*#1<<3>Zxe@PS%A02RtkWv-LYc8t*UT9(M^fzj~=v|F0N zS0ElxvEF%lhv4dE#5OL|B?usLwuMqw|(5u5-HW`ig5VzjPH#g9=D&?DSx3&DJ=&$cK%MJf;U@iZr zpH4wHjV`#MPG#JIAdy^x!M;bH`3QczI{5W>tTGGBGPWNqhAdMx!3}Ys()4nYqbm2> z6njka8X6MD!HRj+lb<4rj_Ph!8{2c%SZ_*r#DJ&Lc-B{q6K7Xo z#x6jvQZ)S<;0+F93Lo~cR{;r$z!FP}iLwoy?s}}xDb(k9N_k?cpR8#nAT*CS8@jy= z8uvr69 z(g??S49up&6-YYN5n*~VtXL|S?M>)_5@54GYj~k^B8?(1PGCg}B{Uj(1_*Ott8L2+ zEQYDj!wQ@v+yQa|Dd2%RfG@wi(3W%w<6V(7z{gJvOa|yA9bH_T&=`YRFBq$E;`T&) z3~KnTc1orOP&*_N5Px8J{-_%uH_c)7bj{zv$7G@<2J#}|g3wz$-AW%DnQy#aG@0^o z*6^x7*iU;^3};39ya2yHijm_;BtPu_1DyOne8bo~tJkz(3`BU;Hef(rVDqa3B`aR# zXWR^-{>us8>w@ke^+A09eG!aJ(8LDlV?+W`0b4KhXjltpq7`a7Yov+8)CyC6m0{8Y zsD+$@d_ISeU?|*ATkzzKPczUlp!$E6Fh~=3zpi9LBSkK8^F0?nbOy1-pM|AJGoE7` zaN^QR&8-Q52h4fY=9P<(CZ$M|%)KTSS@Bcod99$i!3100m>KVb8IPpDP-n&)jWtiE z?{vN(tnh*QNc%*K+kPc#?;o|cpZ?UxqS zYrK@?=wk#s^m#puhRXKe;y8dnQZ#HLVDRkvJAWrywX9TjnzU$+f}#CPXjE=xG83 zL?)UlfjR@QwAi%?^_wuAOvOcEr}@TZdl3&>WZ-m`og4O@0DN~84y~a-^T0?8{id<~ zYaw!lue`p7TY{lo7;yag*K8GR_d95;4ff_JwyTW4E;gtboAR@wj3`^46PW3^Vj=snqz)aIhx16l2+pzi^u zr?1l}^(Bj3F14Nfi^Iy}QJmEA;ErRC7QyuXg_Yq|ui#Du$Td6vcGP8Y_(MqBkwvQH z+(I4iy>`Q)3X5zFZG3~WXL0UxI1?uJ`T0F`D(h?y@ri2}Wj5y>N|{ye^`At~CmeH{ zob#;(G*$3#*Vj$RPD^|p(QGy()v=rDoWRlFBigwkS9u&&y1kSA?%7URE05aup(hWB zmkKG?2K4%Ifo~5NoKldqlg~Ez`G@QmKWE7~q>s)$%C&m3JG|2uZ4hWIp(JPN?M<20 zx22AS?X&^Y^fye&zN6TEqeZe@NG^6GDzlfDl)eH)|~WEuH00<%Td z8^Jx_S{P@04aGtIOklWA~vzGY#G7zFOAUVP$4f6 zI|veQy%VdX*`|xdR|FVC>47m>Ph8R+##?8zL>=DE1$O#BXe(#q2Y~Qx3>A{9jgsjj zr%vZ9B7MVtk8)R;ripjMf@B#T6m;gR7QDv9q+dgp9<3fJF{LoO+RZ6OCVWr%;v$;~ zwHLZ4MHPbM~yzm8U2J(12|X&f8bz4 z_#WVb&WCXRrT!;1G>K44|srSolj`&H2?RkV~3tIjHd^+b6C^5{Gh zDi`;2)1#g|+BqaLUF9Aeiqtw{bXdi+2He)*79N?{h^uvw6D;AwXiib<>g-jOc!iy=gh?En`;V@9A)~W{h$V>RD5*6Lo4ycFhz;3ZV)5N*emfLJXG4r( zUJtsQe*b%-vf*|ZlvJb_GC%PvNSeS_n+HL0VPW`oP-jK|xTli1oXWF7t3=9I9%m)Vb z_4LHRs;)7R(UImy+vlO`y#K7mu&TAjemWeavQFjVa+l=f8$0Ng+u4unW&hy}g8e`{ zM=k@PyWsgFvHSj)1rc!R?^NjjN6|(&ga8>Sh}1HpX$qNA0C4q+KV$MMs?UCy%jpvd z9uT^HRfC>xn$ zq+TSsos*P>YEQcx9eK8B$Z4Iz;i}hEaijLr=z2DhUI6OlKph+2>`5jgY{78hAF6!u zh6|>HlUAPG%$Y^n48fX~htG@DN2m2sd6;wx#TDX%Qiu;g)Ly`^N7*OtEY>CV5u)=7 zAx~yGcNmh~tzwbI@F2*u%F9|Man~2TYwyW|bEWK~jMK8&)N*T=~oNP>Q0jx#ZJI2PvQ<>vNwcXuyg zS@)=&+PvCYr{u2hD-?vC`?~mqa|qO61OT{bLm+Vbv2X<9m=sWk=E6->PhxUQ-E>bZ z-tDA{~cGxpWC^;^|O7U8e1DQ~X@GBhCXQf71`R+Q7~I#g zoTo$AoGxNgD!vu^>$|Yx7t0cX*Z-9Y+P{it{q>~(e`jFhWrL*%qctK8?SfRa>0~Yy47I*iJSV4I-H1UcngNJL1^;o!s~*6(N(6rD|0Wcc5n}tg*c%* z7tiUQ3A8L?66AJyT`2Q{C}AKea&@*ryzikG6!#OWBym}(RX^kBnBdCYN}qSqO=@j? zQIqDX3)0`lUo0G~hLBgbUH4m^+xdOZ?lnHf$By4L{Ke0OHC3>#?oy%eW=~8$YgWMi zzGAxnVC@<;D!y8F;Aunc{t5Ln(UIL-^Z6P&opN8pQ0*r1>9?Ve9=o8Wbi=}txoWEN zYJK*0S(_r7JFz9lwYg+q>rF4AnC1^O$bPd8#jid4zCodG%6eDI*dOA}vmoC5v`Bi^ zu%uR+%g93E?6S{?o=RgtPiM{2R@R_|y>acs_=^b`wH3aH!k43$J=yVdgcO!bhJTkA zHgTDG+e0uO4#vd5%R~@T zSG6lKTobXYEXM5Ld7bIE*X$k@^E%nZuP}JAcmU&F=c8!z<0+XBp%6oB^KXK41vMu2#VrJ{KL>Urw$x#hr%qGZ<@@DY)}TQ!>-d zlWXEL!4B580G9!X_p-eFU$Mg&en|zR5d`L^)=8ZM><|wy9caukBL!CKpG+F&IAG|V z>Ei-fP!z_*%8cHe^9cr4qX$?3dB8M#Axjf4w98B~St$$WB!~h0gPjflHV44YxsHww zPoODirT>B*R>jT3g+7)b0D}`{05jx*s2zm&OelW?lUB%g#l_Ang6v3$zG2$|SJb+5;fl6(9&wFo=*z52aj$gwi|Ajhg~ z;M+By&fjvZi@AK($H$V6Yg{1%ha3D>h5J`+kI!j9=`p_sd*@$erSReZPE+$g*JAjz zgu|9IZpA8s;hJz}Ej-qzZ=G#-uJH242D+@QdS^ml-{Uej!gr07Q5J2jC!(Xt0~N&b{$Bn2#|zDms~y=?fpAF{R>$v{L;2ZC>@?E%0+Tl%l*zu7fU)!unyaPh8yrGL z2m*aaLe|2`E)Az-UpxuGXZRW>-^nX`5)j<#^+0hw9YmRMDhV<%<(H&6gyXCT*X)ml zqYPI<$wE9rdwmr^9)Ji`leA78b+A0d!^$VF*^f_GhDz zC@zQ?V2;|0(oR?r9Gk7ybals|w1cUV2^5?d2;0Nfg~+-W_&;rk0kF4lHWEO7{CG!p zNw~-YMN2yiMC@3GD z+7&OrKmG|(X`=RJ7+!4rr8?hnQ%ep(e0kKoJXFq!C9Vhk{j98j*EVbxvMApWE0BW; z*z*~wuNFmou)L@*BS}yQNyMy=Y%d zKMdxhT%~cABT7Ta7$f8LtP!ww48%jc0{QK<@iKM)8-eR#rgj!g@Q;;Q3SuJh@xK2k{ zo`Qro^K)>+c?V2)PxMVnvx((&-L%AwTM|80=1LL&O+f9{o?D9gn9+9nzM@wAeLMbP;i=5kd>E<$p=iyWe zo#1j`!M^;V{z}U0GlU%lt||D+`tprC;S}8Y@m!8=M;aXYRjlE3dOsc(tn~)@F(jZo z(9)~^UZu)LWp%1qu8~zSnq|IK5P2<<#Sz$yoCtIcc>M7F*=Ll&*BsLIz!X#5rxi$F zSEIUMg?A$>%1TW6PJ+I(tyzI!cH*pmu?JlbKd#2jcSN@Kf>*(#dWW9b2Ky?236A%~uwvU(_OMt$K|w_b5D2kU^C}P* zc0veNF>E0yOOzz!yA!arM}6P6-}(Ogat>#h<(av2=b5|T-}t|I#cxPfK!|0%U#~$J zl$CMY`)Iha#2Lj0=r z<-T=80p>f-zpSEy$qjwB$c2;5wz+q)?(CYa4XnV`Pj2sfbSpq&vv}vx;}adr2`+|8 zz7886X;~I7QPx`s$t34^Xhkw2L#VRieo!`5OyhO-RZ>L&KJ72ywA}L_3O%k62O`RO z>x@{)EdI>#o4xt+U_wCQ7QM=NlrsG|t{$3OtygV=zHz8>N<9fcDPN=nECS5{HN`|+ z1>@(t@7V<@&^sla#$ZYnqRCtYJ*&+GG4!k+ zlZYS|NqF`8wb3vX1VE6&v3=qsR4YNQMTM#SLPiF;fd<1a_&5=7X@+5Xm!*1HlAxs< z0vsVuh9FHLD*|u`f$#_Ec>%f#hz$_T^HOPSB!rqSM-U;1W%{A*l)S#agoDqa#2?@u zpdA&`N}$u7m>B&7IG6T`=di-Y8i~-x*_o0mKzT#3hk^gqyt!LWIMdz#x_Kb&r>fk~ zJ@z4P(c*jR7VCSO-aUs`7Gph%P|-5;T1BSC>)0pk5=@$7`uff*Ld;OEHKF zudi9Axf_aRXL?mdzItfRhx4DhRq1CyBW3b|4?$(0RUZ| z1y(95Qt?h<9#w~%LsKPWFDU$EhcDCA-gqw+$87Y+z z`GebT2IdED-mCw_gRea^af%ln)p}rO^rLs~K0uTMbeidvbPebAP|JW|p&W;}FJw4z zIg|ONk#4pw1YTFPC*0U}zZ5d9y|3ZrD`Y;WC_i%V&{^cuRx*EPTYjqOQmp&=>t_x6 zx1gA2kYWhaTBeC0^rfKj+g=Z%XeQjIS`3;VVvLKk&dUEn*g zDgp2{gUCy_7C_ve=nYew>&L*H2x1+D`^qiZ#Bq@DN@;auw$+dQ z+CkoF!x}47SC?M_;_8GvPC5DX&&EK0c-=**Thv7;R0ycgxDT0 zr)1N*xkt7kE7T&i3+n;F)B2pb)$r-x&5YC1w7qNK7OxJD$e|#m+QR_O;kX&Pu{WVP zN22!`_r(AU=7~M-lPewEZ)DpIBj<%7}a{KXl8xk%NPSqx~J#H_t z9Ml0v6l>tw4^5Er{AaO+~`}+&(-Dz)!hz zH!1}cXqTVc^R%e>u-}q>*HHfPm-rg&XXv1t$AaBP~cC%vlGJaDi7)7PB{rH1a>Qr+f33u%AzMKg=}ds}F4tS<97M%Ux1#EEp61a<$oEzI>MKYUE$fQ^KtZrnR|U z!YB9;lkf)?%A1UVYq4jGmi{U&uU68?Cm9NE-hb>L^Jx_WEMt0Be?<4|I}b%KwdWTa z9w;jrs^-P%c0SrzxfU!ArT!;2FNqO()8~RU>@f!7?k*>dKtl!iX-hQ1(umE^i6ud-4A_F+(wcf0b?B&E5utGeV^hU*I`p&bP1i)ZL@00!DV;j5Dt85I`x zdV7|2w&9&dU)*QCMV~V1t*G;vqZU6!T9<#gFOzEOwVV|tfiK%67q|MR<4O)$`8Vq& z-S>DE0^@zsy&m(vcs8&f3*TjdXv$}gP3`7x6?iUfgkob&3cravMkx1ZiPBj=0|R^^ zbgFk?3^FyBGIo0psmmIo1>MB0+gcJ%spkVj4C>R0e04Q^jI*YnRPauja0(I{t4jc3 zhvZY-B=(6-U$P6#9g7&p%7eE~P6S?2;NQ14`VrXpb5Ri_zK$HpALPjF7kmO~kji!F zGvHeVCoA3gmu2%p?+U^By%_Guc+p_@^JAN3DQN6iEiTs#Yr?+ax%A1n_51*Uu|?>| z0$oU5515!rMkS3BBUI*Kqq6E%OmI zG^Q^1Z(G$$m5BpyuTny{RRzNw%*~urWH^Pz=Wq)AeRN}wEd0AJF9I6*uR*>36M;;h zorJVsTt5oecD_n-ISm9N#38tc%z+NY)ZA@w5hNZ&1~#v{r2R+;mA>sWYRzg7_ zY_*vEXxLv|C~f8rgE%$BS3_L16C9b%mu9%a*n!#;Z`twlxd4Qu zmfsRU&?*=9hpLjQ#jsacX%V}1Nbb|Xg&TmQ0#4tF1*CtUjf2N0<;r+!%+|qa28DIf zLtuW_GPf!B;mFW;%~=EvfyNdHG{@|TEubGu70M~4rU_kjr99!KVsEHg!74ab(wBwQ z#{?v&u}6PvQFLQDc)1|B;R(O5vn^R6%qcuYU>mWdoWPl^3njo)-idoTM88-LL53QkqOtbT{EWdi3+Yih-^M z%aob&{VXd z8ZaSL>*LJah|Y0dVHA(=XTzE5`O*m99>)w7-I{cUs%kqW(7LMbLHP>;&%5Ud+3y;K z4}L8%Vtwk<(<7Wa5ru%xg9dAen$Ms`W_^accM9 zgyy{9-E4>J=^N5rV`GWwT z1hT){C%rXWBH~KOpgp-`Z?sti(0M~1I>+@6ly%AJvIvh^NXqkQt9PWsCWq@sp)|2Q z#HkeSuaq=Y?F=4eYuI+@!%!8#9m~csCw$ANy7ZbFBF!Xc4KhrV8PD#5F83dlA~87v z5>uIfLh>F~l`gwSN4t4od>?#m2;fG{GYXEEv=2dl&k3{b+at*jx!Hxy11X@4<P0r0s}gFB75nSqRf;Hi8_otF%?dg#2F8XX-yYn){k7@OAm;w$=* z4dCD+T9B%$iBQkq!tLgLQPv@sqzZ*Pl_##lw-Ern$>2a)>i&%`fRYGTTh3O|B{RWz zbxe1a+k9_hs!wszi2zQEZ*?SG8&Ilj z_13`@RorWcI!Z*SV$8A6*rc`Nd+C6;Y6y0{V_5(9HKKc8W|pkFRMyOh`;Q1V+iUMg z(0?qhl>jG2dC$#1NH&r>W$%$pbk8Ss^&40yN!9->LuPOLuP<`!p}@&vMhi(v{`&lsop|DHe!_r(*uaBA#^FOB}6HfBH z2uXLgb}N~-sMB(Y7rl{NWB4s5^l5m%80V7h$9fUv&oShOW(nH}P0pLC!qJZD{^oY( zNzO@54VAB2Jho1gz!8Ko&pbAi`@AL$b*wC8|5A@4b7Wu=2eKb&*%qF<+Ag*aCqKG&pW-}5IIqP(NZ zgHK)QqD}7Bl%(R!@irlckJQ(y>mxNQp4bs(jPVN|`VCJi8TFOEW^~~aEo1JC1Wgr8 zh$A0d%sRvhZz69LPKs${BZjl8!}L|lbf&+zpp4t%?xLN1X!5S&B@TsV*}z)6{C41U zdkgpGqs|w?5(Vl?omlu4DUD0`9;6V#Lml-xGAER z``4es&X30j{`2i9g{&m;5|qz5ORsQTmR& zwMqZ60c?9Mvka#OQIEGIKg&U*CGV4*_>-OQWfpSq4~d9IGXAfGIGp$cChPxh<3|+( zYN}d5Dh?YSCR)b-Du{W+lfe05tchqOkv_$bvE&^-{dRRff@*b%Abf0RUXOsk%R@*R zqTMs*iz6Va4JD~WJl|fiOgOE`^AnnuOO*K`km8s`rGb@tImX-4n15K1SIfGb#}_-4 zQdpOtld^4&dkb_^PI=?IFv9C5{1Jmpszh5~a#bj4=~L-?7(Uu<&YkrpE+XAov~9h% zd$2%w(7aJ9%cQqeCm5o%Px(?RJo6s|MQ7`_^o0&-k$QzY@jQE9Y_gcz$+l0!{uCYr7wRbVBa+8Oe zb$CdIgJ*`tSFGrbS}`UZiFsle#StdNmk&GeZ%$^OJjK~}@wXRs7B_>K zzYIKmnS~tVie|j_|9|&Um-}NA{U5&j{Xf|y27tYc)cAqt0o6DNLZ5)t!Urt5hylpS zfYSlJHc-k61{JJWES#$=N+8)u`)QP+aTbCO57YbQuo3V1D=$^@Nhh9NQ~wF|L}z8; zez)aGckxMgf1`4_Tu$`V_<#Z$<>N$)UNY*;OijfBRgO#uxKUn))tpNA!mR$T4QzSJ z)BV9Lm`T00pEN3Y=?GX9zhVx*cW2S3?#@}*7h(n5vq|g3=3qnkSdt3i|_>OuLh17lKTMi&@ zLnJ$vUXwe-?y|rQfjbM1XbeNqU=ha)t_9mNTnnxj`Bp^*LWHu|pY@`>dt)EO524N{ z-zPog@u}P0+l?J;P%T_}gM@3Fm#3D&H|fh9f;?P>|85NTAgkgsZ+nJ{&Jg%*8MX!2 z4FrYf{4j8Qjd^U^oRDAATEq~EJS;XL4wy-EY2cx#rK-0u1>~S<{K&0{A6;_MD34=; zYdrg6LD3dBbgg>^o=t7fS}4tiXJCg0DgUNw`kMnp+G$BX(aw)tK0^oY-DSPA fuse [label="Mount"]; + fuse -> kernel [label="open /dev/osxfuseN"]; + fuse -> mount_osxfusefs [label="spawn, pass fd"]; + fuse -> wait [label="goroutine", note="blocks on cmd.Wait"]; + app <-- fuse [label="Mount returns"]; + + mount_osxfusefs -> kernel [label="mount(2)"]; + + app -> fuse [label="fs.Serve"]; + fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"]; + fuse => app [label="FS/Node/Handle methods"]; + fuse => kernel [label="write /dev/osxfuseN fd"]; + ... repeat ... + + kernel ->> mounts [label="mount is visible"]; + mount_osxfusefs <-- kernel [label="mount(2) returns"]; + wait <<-- mount_osxfusefs [diagonal, label="exit", leftnote="on OS X, successful exit\nhere means we finally know\nthe mount has happened\n(can't trust InitRequest,\nkernel might have timed out\nwaiting for InitResponse)"]; + + app <<-- wait [diagonal, label="mount is ready,\nclose Conn.Ready", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"]; + + ... shutting down ... + app -> fuse [label="Unmount"]; + fuse -> kernel [label="umount(2)"]; + kernel <<-- mounts; + fuse <-- kernel; + app <-- fuse [label="Unmount returns"]; + + // actually triggers before above + fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"]; + app <-- fuse [label="fs.Serve returns"]; + + app -> fuse [label="conn.Close"]; + fuse -> kernel [label="close /dev/osxfuseN"]; + fuse <-- kernel; + app <-- fuse; +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx.seq.png b/Godeps/_workspace/src/bazil.org/fuse/doc/mount-osx.seq.png new file mode 100644 index 0000000000000000000000000000000000000000..7e310f9144ba42896efc4ad1b124b823bb5cf15b GIT binary patch literal 51408 zcmeFZ2Ut_t+BUo?B37`BAW}rff`wv1krIU&tbl?DmJl>5QUXY%2!upMnNb8hqNp?- z1(Xs%ASfV-1yB*tp#+H#5HU&;Ite5s`JWy1oGE(V_q_l2eb@J2*Y9;aW0JksUVD|N z-1q&gUw7JXo;F!)GJ+t}wr=^!5kZt02%<_by_IjctZebJWp0CM3!aXhq*xy* z3A?ucr?J(m-X|RWXu{D}g03L5m+6$s|Irt(>@NSp4nh04@y-AL5gnedqtT5IZf`ym z-I_0E9Pdh+I8Ql?+<>u3m(E=^To-qbe9q_jFlGc<5vd+fTh*huc_x0U8AsFlxkPAl zTN<@pk|oo3l68}xH_%0TtA=x>Q9hrBG0h*;Lb1$>pC{{T7^xU*cBjHMQ?H9WovBOj<)v&U&KYWc=&F0&0EF#q%w_#QFS1fv8u}c_v0Viw9`>*G*NW#`& zT(s?N*5?zUfk8n*Zv(MKh3fA4n6XWgS$}>eV;IBB+PIA|Nk4L(Ut4%6iA1tWty^MO zA1%_L{LZ~SB(92Rka=D(DHr-N1+i~4oM3pqPB_}(jY@fGO9X4$ZVld|5nLdXVb3b+W^!hb54?J(| zV$lMbfzBc3nDS40QpaA6C?{E6UHYZ{YC3Nk*}YOhX$xB*>85p=?qP-1{%iSt8Sa}* z8X+}2qvGzb9LcZKbT5g>ZX(7pC)c79i9ucI=V`A?@O%8&WXU`BPq1Lo>4mj1FF-Pd zp;<=dI;!e-B{``Ys=dm)E8}>ZZ4BkLb05aO61o22K@X)9+|^%Q@Mvz34Hj-_G>)vA zd$n9E(=yHJn##RD4h>EZtO{el8AoV+J8;U|TyZFiMqmXe2M3Dr?qvx>%uc5yd|(Y#JjB5dX~fLEYc=d$%GTj=T`?2ux=0!p zYsqEHQ<145f<5F(mW|f!Y8@JjH=axp6}9$Gw|`6wXHW3gQ_OpkBXvIBYD6WlI`o?! z<>Q12JtXdc{@RE9vMkO%X~ZzMOVBh-W|H2nJzuj0X>afe%V`r>9j@n&(U{dFrQ3G7 zX{_4fdk$RRVh7wh0-F;t_xMvome*0HIH{3fTR@x~Gg?~d9bmTNG%gRDP{j5yw|M5Y zl0SHqFv{)9YPWorKG-uTvC^aM5tnDNvxH$T7{vYsjX8#OA}}S#54L*qtT^l;^J7KH zfqK8~sT-V~%TMfKrU%^=#tGN5BAUw7I^x5#*=yY{S1&-8U&FastYb;f2k<=SbFEYf z_xT!JeNH!PAIW0um14%nIOc2oTR}@{Ppon5+Xy<}q~(-)6Z11|pV=L1qjp`RNdz;* z&hUak+ys^>Z*I&%Cp+B@?=zK4Z5JbXZ8i}O*%Ca$yL_4A+L6%wg*~o$$ z(LtCz*i=;T*=mJwt7eeabW)D?d(jM*?n1iv%i_;P!7OjyErLz}uVgSs+UBZpU?o|0 zEtg*^RWGFKs2(I;>~+GIb{|U(6468?e;)@4I?#KS-6ya(3GYb06H6`j5G+6Bip4(c zVTEOpl5_=LeQo*J*p=$SA>qB@fNcA%Z#_12P?;ZWa0O}vS=?X`OX@T<@hTy!u_|*k zO3HieWY*NHHH@31(9G}=ZGVlQCW)HqGw)uf#~Ej?bY^+)(sC@>`2e#voxZJym|}Mu zDI6?>bCAXoP)lifwP#MQ*>#e^vRfkB4;o-?;e=~!4xaY3de z`U`5GH(S6Y(R)8tb)}{SVqCZ_Wp$k|^0LeKgw?1Zv>6ly#;lO+n@3X-{ynQ9OrSVQ zJzN3lQ%~Qhi2m^UlUbxgY{#t&ixURcBZ$AYE!?nhUrJ_i_@Y}_my7#@hkvu<`*`79 z$TR}GaPbFfXK|}=qN>WzHq}5@q42rb~P4k|9Ne^7|wP(!RyYR&!CGB7%5R6u=ekT(6*s1_(dT8vnTF zZyzJo4@FB73==Bb!S{TzFW}MM?&RW-%J#JQ5_M}0JkMHHzjFOu4TV2=C)DSiJ0Kg* zH}BJ4k(N4jr?a!2vom4ay$dGOyC=+79>$#L#!1w!i(F*javNpfaECF6ZK$%xD((_B zNn>N>IGjEho{gzyQFSu)3aGPQeOzaSqQX3sl}%)#@>6k@bVMtlIg z5tdmKVo`p*=BQt7ZLJ|^aCA+iI_lx-*W9LZyB#C?5=a9tme4UldRFH&@mmbm24dNE zi!XBr2M1j|JUkK#-ty1}?r}>To`^Ll3vkK}GO@e^T~0E;dom#mOs=##L&|oP^*YI( zX3CD7naT*2_|xMZ_cMoE;ofBQ;dOPm*0wf#8`Xmry6jh+K~LyF67i*3R1$HQcLqs1 z%As2Z>80$8&ev;uyE28FSWr+vcK1{DvP49w;94k_j&jViAf!`(T-6h*3QF|brs6*)**;A}3GE0S5zma~=Cd8aK z2z_u)jVLDUKdbKetIbC8Sf`#$jNa(_V@vCy2fbL#J0aT73l}c*&M_DP2bu2h%I2Yw zVmEn{B#S(Zvl^D^4%2T+8|fmWRP@=now768 z@L;UV%0=u7yJ*Rq*odJ4c-WS|tbF-=>BCo_S5S!3K%YhLsrDn~M9dfm> zw;^Y5%X58a6r~=p%^)Mbr9yi|I$0AdQ#6iLJ|IzpIZ74j%^8+i4P#bx%Shd1S9JZ# zAmR{CE5EDpBIrP<{x27@u+O^4RXs~$vt$Z55p(#dm007K-|KtT8)Q4t{UtO0=Ks$Y zTFch2h-kM75jRKfY>#tj`%HdLJaAy<`pCW>nf@->nLFrf&A%s`FpSxP&ZF8tPB z1j4+SW6d$9zyxKF`4 z!+&lbQ#3|Do?ra!k0u_-94Se6! z%YVIK#tGrsqyJ<$nJ7+$|5APf#{qwq-ax*7R57e<)+trWzfpH;W6-MeuLJj~)6<j)wGJ)J>%%ikfxWhn{JcCJ|faoXhFK1&bqR941VrHt7@?@?M%P>Ck}h`*uD#d#bc|wyl$%!Kn>L(Io6^ zdLlbGi*h_KVs}%OX7UesUfCA39W{NVbC=JJmk)dixnkM`_{Lvv&STE}tjA5=H?S<< z0#6oIlJ{%eY#0{O$ZORk3#TJ+U$ZTuGTTNSa%{#1MdO?6PoqRcO06QWJX&wL+0;!Qs0@^r

c-|0Yh5nbnWe>Fq#RUjzyPdpQrTT8 z-?bqucgBi@gajOzIwoX>&+*ax_p)omS`#hRr=GjeZa-a&%!?6HeTr+i%v}0&vy6C0 zC+Ehp%*>P9?$wwD_wzHz3lFYe+pUKn?hIx3r}vq!uP}3Qn5XboxGZShx^)Gk z;Z~kE(%lYBFcQd8N@DwV%iHju#28_;wzl4pXKT)Si1I>rP1W(CQqa^pb|W1UaLA)% z9c1=|XB`cYXf5FIERi#5U$P_F!NCuMrz%e59)77|b4Lmb;^ji5Q_1l-1Wf3BR86#O zTGe#{kN_BiBWP@hRA`psg$gl0e@*6LWbn%u9@JbVe=HrhMR^So@D1zs&B0b$ zZ<0lSfRDz?dJS60D`Hz6&>h`a)dM!*GSR-ya(A~_*zD4_x1607dnRiPnzmHXWGK)k z7*?}_b?BmDeGa=pS}u7P)I;F=nfKC&KHW^3G~1huC13Sp2}#3fe?}-s9xU>CPs9gb z1-!nm^?UY|KvNPaRq8V(pEf*L&d+Vjjlj{#*p0L@@~D^~dD|eR#(|l2p9N2w751v= z;dGsqv@uEg{LiliE@!BAx1aTxPxjs~u9o0}&2K^Qo)=>fhdA(D>-$p%%DA>?b5wS! zT|fW8yWy>kXgY#SjYX!qJ7Ag425CX;dr_qMLz*2mCB~juarLH#l0n~n3%yeX#denO z3d`T7zu8~S4bBmeTO~LvA#InN?I~ExTv&h8^HwtU75g!1Q87nbPKa&9C!gnwlZb4h zcBo7(`L`EGb(g?eC%)7TJ#qw{TXaV#Sum__C+I$PlHC^^MG*UH>aI6PS)HmeWum&a z0#Y^fdd5M-rsLh7rIpt?A+=3p$8L5)+V>BG|xvJNK2|; zf+#h&cU31~PWHdA=v{pI#Ss9JDt?$hm@l3X`*_|)c@Kgn?DIBqAFMGlEf#h$^*P#) zsuZ@tod8c9SDPA$uO&aHM;_ext9(^1={B)l9R%h=Js{eoN|RW-S5;;9W(4fG{~=wX zmS6pv%bNH^M=z&st%q*psZU*c9$l{OKWWbl?@H>lszNR3Zs)bR+)!QX2ipxM=xgrO zLY*#yg8hrFJd4}7RsP7cdsleTnW}l!)zwBv3*K_o+uV+&J+Z86!Qbu%UQXBd`$?O(NC2=)`dz>`XO?#h}I2EKMf!TYPl)QjMkQmno- zKDGH!34T@AjHTDnL3C#X)LLmMRt#EC1TW{GZlRd=ryNvYdEsM9fIRY0k5E9jH3Qk( zcMY|nPd^O*_l%61@eViH|12hfxia2p=FzL532s>4OwDERo!^AejW?Q9_B4|yke`hA zO+L}zG=Sf30NRyXOQm?CK$r5^@qg1R0^E&I?ytaMmvEAT2r%p+URD&O@0ciVIVWk4 zjo~ebnEY1&5~1yWa(4YIGPi+aooxg>U!) z#jU7|n|hfk-|#$xv<3lAxG?D|`B&;WORn`zJNgNx4l(uhr`OW#6pv9WLQ8q?Ll;*6 zIdnhH(X&Ma4T0E3vJRdXG+{UEbar4zy%Wo0H*-MJQm6wvtwC;k0N!XzzvNJ*HK~a3 zV^cT$b~QmA*3kN&o4_R056pfZXgXm>qV?^rK!(6fcFSp!@N*rtJcm`z4bS5Au=ch3 zZToQiNogi^jfJx_1L0M8^=XsZ;mqiK#ZGQG=A ziJ`<>eWN?SwxDj}qu5403Wp(zL>T4Kh*Ah8cCE;rJ*YWNVd^EYW(>o&AHm-$FMNVf z?BMt2_N?4;(Qv*PaoDhqnpw_Z)}auw=*a*Nb@zZa|PP!EV}Fc9j8w! z52!t!+Ue`(X9OS#Gtg2^|6N-AX&B>|Xqw{Q2Uf#sUThW9jFVN7p5*xQi4n5&3~HvH zJI|kcy5cuM*hQqYtZeL%Y2u^l9sIh@(fQahy+PLYpRA6!D2%9<$>z$lk+jg>mocoe z2#M%I5^jXl#7=bu_*iu7EN4R=@^d zj~l_Rq(`uIRVu`(sE_^5)nCFp1hHD&3zQM+(Y|TA;OlS2X}wL-4%1}sR%^Dmj$$*(PJwa@ z?H+2}+T(}lYlyX;txSn9F~)L8+(=5K`IWW?tfx^^;l8xNj*0FLDAL!ush(8Mo@2YR zcfT&b$oB)Y#8f;;=!Sva_!(Bhi0W0FEc`@=Dl5#9otvLtCXZ&ROW$Dns+^GaA7ORb zzbd(RG0J%OQq@1;Cw%a!*dL|541M&kY`_I8jUv-0>wi$U{>4Q1p$CG9`}I}jF_?b% z`hg?N-Bpdb>vZV+pv0yhSSMlvud@8RwzTgYO`wO)iv%2sW$5MrIq}D&*w(cOB^j36 z#O4zf6%pc+;S$rF#v*SqO+eErB_!t&Nbd_(P0Y?&DQ}mF*lP)_^q-N%s!B+TrPF)` zL4(n0_N@8++7>6a@tl6eh&@$1o_hX6B4{I}?=mbt-d%5;Dk{$o(>1kRC+@b%8>_~( z*2q{gFF@!Vx0zLH2GqVS1P;Y4bdTRvg_Hs+ciHWX5bAWD8G<;iS~IqJ$8-8vtb{;`%T9J2rL$}#-y$=wItDq+k>J-Q_Z*imQOe+X;k+a9kIKepYqa4*)OZ(ZOIIraMEJ| zpKG=7Gc`B24j-*w8KQ%K1x9^F#OPB6q|Q1zzhHW^|H=0XH<8%m$In#^6?wg}f48Qw zKW>IQH~mu%)uzZd^Po(U&hHi7|Gj5`-iU8%OVHu>a$g70a$Xb>egeLVX_?NYwPxgs zOUlZ^j}wWj6YDsaX@o2)_fYLqTWf@8tR^>9OSP$TdYJhN$uDbpXqT`c*;MYC`uYyzo z1V=fov+59cU|`?^T(Kdt_`)+o>IY_pCY}~I%N)f&0{%rO&*8qP=!+6L*&vKJ=*;;X zXY;xt`o?`oa@uz99(3Hf(ITxOw*A+Gix>~;oF^JE`F#Eft5k>Ho*vQ9eNMGzH>9eF z#Y9N0m$KbHKLtz#<6yA%_M1EQw_Vw>i_FAjn%FJAx)_`KtDDv-M zHTprAN|uiLiI-n8Je_Mb7MdG@ATem14_bf+B_*LI*LSofB`1H)6C6Y4;KMcCD>T7# zyEr#C1-3$4iGXjDfRQ&#t;^7lG}gN4o0vX#_}25|JwL>Hnk=GaWoBk^Md;?V%c#%l z4+%#pi}&5PPJRw~tbx>3;@vD0>lebvKjDY{@H{aw@$Df=dI^pWXt2fEm6gpC-GN{U z?(824g3!VYgCPfog~7gwgqBvrBT2|rq9A=R67ZA?8O%DYhOEqz#2t&*0ky)$9XL>S zqvmJ=++Knkvr21N>1zm^ImZ6WJC3I?`?A@EC6v+oWwR+Y835@6J0wJ719j_b&<>Ny zSgLo+)3~#ZHS7p5$J<%9LsSg^F;-T)eL*Ins^adsQi~ zVkzS&>=FBn`gOoBdBNCy&e&9`SpYlMLc^r8R{W2S@%AKh&ha&#^eV-@IkrYkKeB>Z zjB--T>zA@yhYZBEn}>VO-!w?0WoD*Rxko`=k|0}Y^otQXh-g$%R#3OQ!m8nuhFPph zo1t~wNoHWAjI)22+^JK4N# z3O#cE)Ao}pi2W&;r;roYGr*8tPEIff-;b;hH1nAIoN{sia^dd)ZcE+dz}b3TG|&Hb z0QV?_Hyr`^T%J;YCz#t=mQ*nHFys4B83blVa*$mc033MGH&A%=`F{ZA|NfEkzANd- ze5H>s(0BVb({M9Hp1Zb(z=Xh`Zv=yp>TR`AL^d(uDPgF6}M zU4*x!vbig%o@VS5C@bkqD29J~&C3#-4j)M!BzTkerq_uV6SkUh3YS{|sDx!371R0J;Fxn{S+^4VfCiu(vHJ1Jq?0jDdg44!*0JoW zV`Mfydo4t>*_~=#Pd~nhnw4*OEOeRLv`ax32SU$NyoSToEH_8@cO(PF-M&|y@^}(! zO{0Hn%YGrjJK4@VbY*eA)^9o7`t-F=f^z24sF=iBp}IA|^^{ev%Bf+jB}+SwXRGYb z^7bN>)}#|k1-XiIO7aVDri>>n69}*t8=AuMEekI{P zGorVZ(5YNgFj8mT-^Oo&F4Y4loRwgNlOA?a4PsAt!!MTI6XupYdSnT%(ULs)v4+Er z5cAn%rDS2RG?6@NB56)KdIoxWsNjg!!AI~(Y%uvl3a+x= zW+7}4ig*SkzZh6vG$L!zEj99YFiUILh6+q9Y}V; zYe4kJ>YCwBl|S@`vOB|lg<_szq<`^9c@9@Z=rFh&@3GFafYF&#uZLIF zKZjAqnpKXpF{}8XD(%||7JGyH(?y7q$`J$QYW*^u{wqYcHoYgm9}B9>!D@UU;ByT-0^vjjg{P4E35LwF2#NS0L;fh9sa zoC4GGE&fzVWx3qY!tSi zFxn2ZuFfAOR-U|4sH4DSrJ!ihFmD({@>8%-jyGW+Ie{W z4)TQc(YQDm-6h-9Rr2)Y5cscQTh!;^rK{GGOT7|hAC&^vdk{$zF+J_{dL?=^D&D4~F!Snwup!ReA)7&ItcM0{S zS(Uo=fiX4z-_hOt@X$=eKQm>)NH-W8P};ghx*MoIUK%NsJv}-ZpghK}BbpaN>NtZ1 zC{yXo48($stavYad}2%cZWLxhLO35ke*8H)f4f1KCcb#6sBg;py>hOs?IhS|HIT-F z?rGa+NN`r1&b!8@o+tAR6}9E-fME`7DT>vDDv4VH0w5IsBi3TW6C>tMQc2Rj(2j1l zTF^JCL86Cw2sYpQ#?i9ypP%D})7s1BuUke83>U{%+pnJ52Z7u;6@i0=r=5z2%C z2l4BF70Ud56MYM|`sY)Tk@aZ5zaNTy)3`uy zqF`BkN2l>!dKB(MBQN*7fLb>t1;l;+KL`3-7WoV)5r80t*rl^iN^X{qH8MK~jKg}n z>K6}}d!l`>yN*Pk0dUghOm1UG#_2WiRl9*IWBrzG_xNne_}?G_Fba(eb-7y1OwZcZmODd6X=h1Q>ro7eSQ z7%zC%%@wF{LruHl2cpzQx@ao2vYrHAX@aUEA9%3HI1t98?pz)$&H{gH--f5M8rRhP zSlOH&5Ajv8>*gJ3bm9L5EX#`xKaPN@A;o}-|4$%>{B{WJlw1Q~=e~sqrPd6$(zGky zaSmV*NRlDEXW8EXb?3$7VhXm~f&qh4s6er}RCQp(Su}Az*Zyd^x$mH8m}?YJ3j_?x zM`n8c(UL%y4ndc8^1jaw!V3Cnu5@DFhwkp~im%p$z(NT?-rN-ESD(cs24z#Ta9P~H zbQC>uYbs?s2_c6H+3>GKVmm~Riy>zQeo=(1&{RMN1kViAH8synY-lM@+j!^m-?F()gUuq;J*o!S5%u-Z&Z2pU9J%t=^w9H>6e?;e?=m4 z?E->i=%cZ?TA#WwS>^f{+}ik;PmywG)x2NYl^fGKVdU=nx(F@w?|>1xv5Pz+%u=_W zt{hH?L-aTzbYKAB(CIn%*ic@6iLS9xhc}cn_0>6AJx?O%p}s{D-XK zX};$sX82r-K^T z*tG)H8HkHfi~6KL9Zc(d-Vh~3>1^s1cC3v27@?3uOCGB{E9 zOjSuP%Qoza>F-&^{QL_5;*S8#2P2$VLbZv`U#l}P3iOgT@OoUj+ktH-ds!>>$m}2i zL6&b3gwgYIBB$)fD6}_Z&(kSf{>#I<(0$eX?0M5@J!NW^?17W1PK<(<)Ig{CVt(vF z*4;uCLBM2vJH2I)=HmtH zDw~I8O(b%vH0ep_B*b9_RE4C@a3_7b@0)zru3jsl7&2~Oc6tykIXE$p#A-;0+Ce(|O;#juMpSyhA^`ySSjS_+$*(}Q#e$`V#7_5%Eh+8`f5O2LAZP(hM-rm3#ebFPBQ zxR{1TIUE@O`qu4VgC?v=^qD6q3VlbDuAI10Or!J5N_8(2Bv@u@AU~H|H$V`V_i8<+ zy{n`SDk8lTaORu$d=s%l`vpefSp!0r^So(bPbwUx97e?!XTKOe2{qTzDQEe>h<3LOHpW>ILX^Z zCMHRwfeLTCRkNgGl2T?DUT=B!KD0kd2-$u1_P5h@t_xf_D;AAGs*B>_gCGTQ@bmSp z1*=)Pead_B=*h*vl01m&dhLNVE-dTKxBWp!>oC8&j`{|dm|a@O)r}RaY-P3Jg6U1I z(-De9XW=i)48|sR@Ky8P`UIJQ+88w0TmbZPGc&O(B8?QnRK~8%Yrm8oqAh!l-`wIwm@5w#Zl(R3i{UHK%pP*Y)!bx5gt zM6}8ooy%x&+Wu4WHua{cS$f>bXY_2qTJ{8zQ?|Pk&!-IHn`ne0%~5B#@3q6J*%6j% zXwXNQCA_xDHQHpwRni0IyBwR%>j+vpf(Fz?Oi!Oa?H=voj+TamMwX9x#9E*ILJ75e zC5v<1RR|?tq#mY~q-99cXR@Xm$pRz`HY?N3D}6e|9?K2YUfe!l#%T`3Is^Mb9=vd| z3i71sWPt}+fRXk^VaC)jV;yR;I-~ND_p;DG=%L~rxihIrH^facwm6L3ivvBWB4Tkq zqPwv~i0QA+ecCN74TowvG9EaFtp-IXvXh#3#Oaqphff6#=83hXe&$gTq)(F8rdn#% z5hlKH15smSk~hTt{DiidrTn`)I9OWm_MBFU9ITDtgG5YSgClC zl%ZlcalYE5#X&^{_b?8Yiv~?ty}flM;*wr|XI(^S@4ZBagQ@O4Ra#1z{X^(c;w zBt3{NRB5j5Se}^9BNS{H;FNc~q6L|r)pf91+4GD=&m}2k2Z~)X*6If6CDfHtbyK*V z6;*N#_WE6V1|!?dILnOYn6^}v6kRvvhZKWtqlt!TqPJi(XH1T)^zxMcX7yb$IO;DxHTjV}2l|b^|K%I<-Nu_C8;qc^mG&?{Y*F{4kBVdzS zfnLJ|;gFtj|7we7oENJ)*8#Rw1Bj$Q4(%#pT2fvv{QwP*@pwLEb^ZMwm}&aqV_AHi z&^AJqP(0nU_hHD(D)Q*@y;fD}{Jtc4?9u#5AP5Qtq|u8d*c%=Z-ko=Zjd&L6IlM{W z9Cc|g>M=RRdB3*|ot{1m*s&LZ8G_6*NT*brl09WBgdd_FSr?(YidQf7YQNu8#}Hiw#VzI>F3mHJxA;5?BJPJ%nhN}KE&9m-x(Lf*}2(KpIa^w#MU6UCp`tl^4^{k?S zI>I4KIkKOz%wpiq3fMBKQ1;f3EN02ZjHK_4q_sFnBaSr6FJ?d6&JP~5oI3i83=I38b1oiVufz5-0 z>>4QA>AIj{B&|np`dKvy=uhmndlyZPK4uQe&5!%-ich)Cu^+oJV-CQOqP18+K6d_z zd}N_Q%~|37*<*ubX`QST>@7x`s@r0}?cV+!XOe{aeSfQ^LRLIj@S$M*2?=Fu2GZ;d z)?%5DJXxj{nop0dDS7nZK}ch)6fm#J`aeQa?;9bnmWp~8Ve(rRkhc8Ir)r>NNkc;e zWl_V6H7A(na!WpMj-H_3Trhup!$8MpKPerKZ2^9vB1-JLMr$p&6E7HUNc6;SaOqY9 z+L96Q3xiR#8F=1R+f!>%A{V$|v}trCp-d?^Td8vOc!Pz-vlJh12B}J0PYX5&;u$I!W{>LK&s+s*|>2! zmLErQ-Ej&4$OF#K8hD@Xk{GDJYJ1dcJM< zKp(f=g~AgduT=9mHu42vNV?e`0Z0XRbYYDMP!311>i2GEor$5(P)i%NsBe%YS zH?ckEx9g2y9=uFmju}W=X=`Y8eyw-d-a+nMd5Yto*R}Ssh#=W_uunS>EBxfL*ts`j zylc0i=%lXkb_ObMLcZALuQ}n;w)dH7Q{(?D%nfe*LM}Eh)`bdhH2_`UyDxm<&5-{U zl;~e^xICxm{$?!lV4d7*gv~gukg*MdI5kj$pjbRm5R8a`+EsB1S+T7O6sHAHzk2TZ zbEEbK@PUGzab{|vMP6CNmg0MGKhVcQSv_jqbZ$wvf!KpCad=3+A}XeTkO=d+9>4v8hqK=` zE~kMfkL!Pe&bqTAPW6boyyy}=Jh|2k1mrpPoi*&S_m4VVFk^obbsktZVdUXE43Km* zoP>pWCW+c79PC>{9#|sVUcbiK*}3(^21x{%rlvrWi?cIm&Bf|G6R|>H1K=h5%R+~{ zH-oky0JaPeYyd$%g+;^+)Bz3+icQflKM|Hcg!!M9@}%s1@#H)ss2k7)SQj*v@s43I z42VS%f!;z0t%L?DU(4F0nDGP!(2%xYO-xZ0 z%#72Ipf_v>!2RJBu+EKOISUXge=q0(f|LOwtse>WCZxi-AP{xNGS?ZTMGU!Uw8cng zo<8ol0>5gU^l9^C(Q4u74m_w

xR84Wfas_nxw;mxd(dwPzqI@ka{^b2lXLpE0pPv-+>(Z;< zYw8ZTT7IBGuc2TjdO?ngEz4GKrCvb^olWy)cb^3PU@1rz2)S)Xoi;+WPnG)I_iLx+ z#3r`vZKLJnmbSb-Aq`|*L~-M*qtV8&@XiC1Yvd9};4db%G}bz; z5YatF-S`yf6{Y^eQ~jS?OeH3fYMZAL=IPKj{KIGTTH~&rS>D){emI@$h1@bOt#@7o zp}Vfcir3B2rxg*OFQwV&aR?k}K(*uKfvM4*CW_IUiJwZG91hk$RQp^{HTX3u1 z-4HzJCG-%*VqLn{7XIJ}ue-BcqTvJn(0{3drdm+9fhb?`hfUL=!{OSB#y$LoGhomV zcTHvY@cAgvWEL5>j;8^qBmj+QgP8h5vWjUIfVypr>?)OlssYnYw>Ky+>$%3oIm4grNMC zT=ZQ1^ctv5KtfVL=5c~Z99)0`Ljl0)iZ=9?uZ3S2$0sqGKi4F#!+>ZR5-@`_kOdT| zJ^@j0m1gidP(C1=3$ML6{PFjV=!F2gfXp4@T~NBMgF1LXAZs69@T)gk_!zFFcx)ed zVGns5<^h_d2?T;{&l~*%Kj_D{u0wyuqP_S#2?#$eu?C&T-$_9DrBoFj+tcIw`Jzxq70Pt& zO)9NipM(B}H(ot_AFXp9d7psuYn3_3K4TkkDu>d>o*v4}2hD-dx)9@I1COipTzW1a{0&xRoZ5s(PBSRf8qe#CS*ZaY*qm zu?Baf0?i#)jbFJA0&NJ)Am18>oQM1Qgb3td|!H|v6F%WF51z{=( zagH9p@<=_>UHek|6c9TkTsS-}5s6l72qdk(zQZjwHI*C|5g~1b8;c<=5kp&#HYjG( zwNRddL0mNYLCGKuM3Oj&FPISGfaFpV0r3gwia}1?2q{EVKl z)%vmohbniF_QR@-k&RHGQv0w6tukik1khEVM&i&uZbgS6iMa28mT3vfgCYLiU@YBd zP%6+^4@?-MR;2mq@y3$_qZ_{j8t6p>@)+eWTE6emMt|D{u1nA)0RG4;{{Bf?eQj>G z5od+cT;snLXrOm|DZT|+6)F^}l1EWr^N8<6Am39Z{_jXSz6K=!?B{=`hZ6f?+YE|Gs`m}2zWpI?=+?^tldMzKq|I{)aylbjYDlJs z_K-Fw@I!Z)TH11HIo!Gu0dv@6lvZ8GwCZ3EFcWKG)hEqyT94@7RNHSoV!UQ=TXAS2 z$(PK2eFW%;7AeWrjnK_0QTHm+t1iY0*YoD>w5;f)^#T=wBY~Ll4Zg7NS+hhD%#!Q+boIK{;vy zb>ZiFeU8EfK9$bWo*un&OL9FODpT~ z=uDLqkOi`RGcFl!jX7j zHDEKpx2n8*HHrAjZ1X8&|MO_!8?t_v8E43ZIvM!HE5sQ6$Oj{O?RVs$hyAN6)$&jR zD+cV4hQ8}$YvXohm^5f+K}#w#ac#eK`iGpu=xas87Is&w_daTEf$Bcg>F zaE1t+OENB*rd<4k+`w$VPx@$SNrNNIgjDi>>z(B{2F;967+3Hiv+E}4ocn%7@b_8zznNRqZvHt2`TB9GTkT$zRqo%FEy9iG z;pi=p3&qGw*1yUNzFO-4)@h%-*?5Z}jrL7)uti=^n5Uty>7NM(N~kWEFXoG3U>Lrr z(A$4oq#MFepF27sX#)x~fE*W2X+n^yb~Q>oM8SI}`aFOe_lJuO9oSWY0e3d&`nm}VVX|6!q92e6C?$(9B4$`yG)xa39s zK^+a+2u}!z?!=b>QVF#nxhPNqh_76ut@apdspN}Y`(|oTyVn+E<2^WC3N|Vlx`aXm z7YrpMd^oD}Uoz;V1<*4~3xQ}Jq$?F5B!Ws8^awJy)Z}CmT45rFp1DBJY8D{gbEdqH zny;*6A1UAL!Ju$X-my3V0x&uTgOLePI0hm%3AEy-qwvuyE@>wDneKQQX74jisJ%S_ z`4L*e(+h!G7vRU#%x=&$?nU&_34x{9_QEVm-FkfgecO~UWf3prH1GdKh zyM4E@|6e=quW`vgag+Z8_|CWA@;k%xA6@)c=B#$V&zYeI4(3t-3 zu%|yi&VFOG$urH^)@hJ<$_YD&EK#F`_%9R9aD_ujZSj0j#7VK&ez6a(jRpsjx|30i z$Y=iRjZg}S2nw~f5X2E?H}_fk*7JjlSmc$EnXZq9<B9aav;W`D24Eljc^dxt@&AZ>`oCqH{B<#Z<3uGGs4w}?U+Wph zS3W|4%CRZ>kx7GXN~S%;#0*~jK)-|uY@L@eS zY4?x#1-kSg!RpRl^jtIodM=vgOS25@`7f^cVR^ zY~0#bdcV^&7e|*a;3PS2gP;bE*!~FBS)6wj#h(){xyz6HfMXEedVgyATd{HDA&GH^ z%RycKOUM@hviP%waGye+A8z;Iy zR?S;5J`$U}{(Lx423l_nsQd5o!-4X{<-vq|TtIEWrBi)Bp+Ll$BQHCRfEs-R)v~tQ7r(V$j(xLsukbX@`o|lt*LC>{7g`Kr#{CT46Qi;ClFrF!_ zMvp}cEM^PVXP@(ZVnpGWZ2`z3Hd76svVG~`>pt(s60z`_lY#Ivr~9E@mvVE@-Z5*q z;z2A)<5_RU3@hiIh2HT@r=;NF@K{oeW+&csqW^AmX|%r}+*t5{AfbwBMYS}65WB>u z6%O{^JFVGtWf`6N2Y$0+7M_7C-}DGXN@Q> zLRUEBJW$R<#ej2ZZ;#H$;aBJZKRXVYZ3oIZ3)j2hDC9V6A}@mRRpRiDj?DpkN^n?33Gy4 zjEW2uAwYzHCfeA-V#_LRN%cMZ>a*0TmVd#2uT z^90;7AJ7uBk-c`{M;Xgh)G%F>l#9#F^98y;_&^!Dl8^*gL5IL3Vj^H1Fx*CQsmD#@ zjKmX#tz-Yv&8c9dS5<(nDz|D-NsibA&;yiH0g#fm!a*_u=d!UtQwBgoeSnGrmCsCt zs!1qMuBpOHc02c3dNO?33w&@x%-kucqjm7f(uX2{h4jwUCt1AwgWAk!!Z{ap?u zDU~<(uk=-xawv3AG?js*^Jj@^nE}1DwMRFE-~qekp`b7H*Q3?wAt>?k`(@YLpOdk? z%5Z}>cdmm)_7dpRgp=~&X97^nNQ0yX?o!#uMobA|0GbWg_Nsx(1_o1%mX!hWZ3@6c zMSKq-po{lL4^BN@)|~+i&#V5>M4_7j)Y*PAWAYbB7$o?J8!U_e0pE7MlGwCaP>?b1 zbq+$CriScFu~CQb>)n<`-&Uej4az8!)B4svM8-nGt5D6~g(y$gh^HIG=Q@0>5wHEP z?buwWZD{-89|EnYJ^W(+^8ZF;|9403NRk*8L<1G7`l~eS?+)6eFR6FrlZ?U=Z4N@`{BM&@?EELX9eEG#hq-)8ZS6)qhv(~Tw+Ak|C_7~cY+ePJabJ#A?GGsGc z^n{#!SzF7yjn0SZ_algz8`j%I-rIZFJ45OAnwM-#$CQvhualmgp<3kLv1J zq}&S@W4-!oxKDrBt5CmxC^wfgQ9c`4wSMM8qGc3T_^zBS$jl{c2EA$Ve}AG<2n6VS zxqHuiBMg&8@#1#C=pL%t%BEXruW0-VI?YU_R1>ZyKFe7tRc@YQYo1)2@H#I_SFbOz zw`8KMBC%K$N{M#6@>o2~i6fB?{if=q1*T68-0Iy^PtN4mc6x44FfvGK4S%{JrDe%U z-oRK}H>-GiHQismpG`)Fp7UUG#lQoy=+--pfxCHWog0TlAk_Obp%w^>pf$rkA z74A}Uo%kJgrmG;c6&Lojr#tOsc9U1Rc{+|ghS`TZAK?NL!jlsztO@G5Lify0lWp6a zqZLVt_jk%_a0a#C%ZN7@XI0UQ3QJKN}aeN#dC zBw=8bk1MYWIVqrBDA?&0r5?I{KHuNzSQ~k?ezTqH6M#8S)HLMG)B2S}vWe9PSSnk3 z20}CiNYc?ko*p-uBfh>lt?u_a+qPiOWI>$Ku?yYV?E0qBbbQtpmLROoq%ulD(_{(; zkDwtYbjxZ*1u79Sq$+ToTcy`8U*>yQNbwJ=)jsEZ3*GQn&X84DWk-?6O-C2jMHuH% zMz*#1P6>X`+ETf1s+O&sI+BKNHxA7SlW*Ui+Ll~9d9nIuOYLfR@_ATnD*&IZiUvsn z;qOAB5M+@y01739$CUW2-}8<+*gyyHmQ~TA!NDaF;o)k4Al(Z$8Ctid%6tXdy)`7+ zADqLvzB9(xYmZk}ANjoqw~VC&7ff6M{=0e(WAYdV)66n1ZsH!oa0s|GK`5oTyrYiH zv%(1$G9SR<^pr1l94WYy)WsTW7B7SXe$^^4zqfGMNBkFVeb$}SQsHx2+Q>M0o$|#{ z#|i48Df*!UXU`}>ATz!Z8t!s&>9w>AxB1-$W13Doiq;)9I@5t;7xRy?-U=p*TI6A(5s)n~|HFRK`6D_&P(bWn@h(L5Pa%+TWZ`&lXo;2VPd2d)qe?ySv zC*USuYku$xUyg+Ha^`InIrp{)*5PT}Mc3D#DAeIS@1;&m<}!TBUCV9RY$^^b$f9dz zSx&s5K4j5#0Nl8}Os9(Y(wpo^_2STR%mH@50FC%6abz&`g{^dw%i&tvTP$0eX|Xsa zWg(&w7KSZHOP;~W;OCEam)idXB!YL4{VZQ*4u5uAHa(Ie0RUHU0*E6D!}~2Tm&M7Z zZ!emMxLw&?Mf@sHc^@`!Nr1=2EZ> zua`wtydNq1jm3j+j1FOZ^6Y(a(CNuRJ&bH$t zr`B+pIrC>Aqi=Uft*#4QemP>KTr+BG@{UG2;)!Li5wx``t;KCU?$l72Cytp6d=r8@ zzo*xmP&gDr)AT&S=K8-0$wtl}NPPN%yBvhRi1a7 z!6mHB2N5v%O=TbV`l+RfFp?e+dt22OCCEpSb9cNM!Zhn#lK3&wMsusIE$Y#!c}-PYef z0rofo3FQ^X?V)22Qst9HGG38;qXvJrz*A+Yuz#_yE^leC}9ImE}4ML8+ zz}=f=o`2Nh8{`$rgRR=tnnDgf`Bw1w=xN{vkil3*yb}opX#`YFSUAb0h)1EjA*-;} zyM@cw0D}kWwy42V?LMLuS@~v>6ykabygkv-L+K1|mUEUf@01Z$AI3<;p89=L?Ubgd zOH-6JHpCGFgR&h0-4F^ZOl^LQf-r&r9GY+?VqQd@umT~nPAmw&eNwNqGjzO3kI*2Z z;j(C_EcMy}m#KIdQr~4WIj7Xoawh-;Zxi#q;upI+LtZh&553am7i_2zZ|aQ2aw@{c z28GZp1?nZD^=k8nLg8v*OiApi@3WB^MDIJ$60HQjOa&XhJ*&Fi@Z4?pG`2nyCx7E~ zD33U_byM`){pqZr+B^*5!0dDP0MZw=&|nxzt#^ULd#Ko4+WkvBFfO~@)|bGI^aM7< zD@ef?d)Q;@CrA0naeqO-sE5bzHk^^dH=u1lj>QJ1`r7*ywTfuN{m(@MUSlo(pfuf# zeXw(=hQ@knofcThV7G1U;Y*1IuFfFiZN`7Sq+gdL)#AdY9gs~|pF3*|$2QP^7{cHX zR4?t;go|q5fF$d!`JH3|bWfFfXadV{IsBN$EBB#z%Ud)TXKRMh-_Qew^_s74<8TQPR_I@@r zHY6#l@}P^6U2h^7h)0#q2*QEFdQmVQFXTZ5qi;+w27L$3YpR^*EktxdHVt<9sAxZr!mW14mX!hR{HAnuBb2>R<5X8>Z0elgh&OB7Ce-lP$q(#+X#;eZFCUB zq%k26{e#>3ar7;NdtI&SB;Ps1%%o#VMVY9ZJ6lU{vyiu046y3={5>S@ zLQ3Go76`14c78=KK~mWC><~T2vuEC@G1VA290|uH`%uK*8etZMz|}UssSY-K5 z5CyqFmAhiF<}6LUnEN&+&ggwfmKxz?`lRjlG0(Vi*BIO8En73%aZI(a=qI)v9QRQt zmDpk*B_G^*hjLo)+NcB@i=6ej7xZ;)EVzOkPh3G5K<2M9uIp__*s|+&IoVe|dX2LR z9ykS_yhL`^DYsNIccAoJWrc)*pDsIMXwcXvu95oaNt6*6vSr|jf27^B9POxNt; z=cA+4!iYK?Q(bUK&9gUy$L%fcs80%ybGc+x=6*>R%u#ic9?>4P=Y_MDGmC=ABbBc^ zdz>j;r---;mzx8`~ z%t>paxCji`Mj7UP*L%ykB06iLVS=`u5di&&{k=3EbIa=q6MjGsu5dT&n7md|$5v$Z zMeLMWL16R-<_j!`R!YFO_~-7l70&0Gt~7JIR%HW?kfL0jWy%;fst zvC5(|61!W7`x<5<3A1gNpZyt^)1iI1Jr{S&c>9p`>sDPWJpR_!OZnWf>P?SWR4>xc z*76{mK%bJTK18u@OLDZ1nTG`pksypsvmAxA!pk_L2ke(S+v5+gI%qlS!wXob+ zq25^)hUL&`c%aB#aQY4ao^@&`Fs&Jp(1c2|k(#ymq?0PzW{B(c?zJ`z5(aVH1J{j; zQX+Vsz1sFahg%?LyfZrJEbjHOciMQPO!ahD2+q>!VAo8f$^@#z`v=O6zj4;FRh2@v z%$8C$TPfq}{IiP`i_C)-iNz2Yb{@^)Q*VuoliJk%=Fo$`d#HlW%$S9!@mij&^*RG{ zx~>7UBUg2}|cv&fSxOzp{;L<>(3RMoScATlg73Y!q!;0<4?3olc4R60JnQh=Z z6gEZnJH`qz9K-pr`BMfs?gP4LJD=~^?y7J(knii{N6X`|<4j-kIb&*pn2Fa*Bk{l_ zWQ@P7(Ic3u&G+pc31M(W2vFh1;T7-qdsDdXl>>(s@s!rhV!!YfC&G-gkTE`iEwmlr zinA&8y5*DQFx-Yl9pb-e?saeDu+lDrl>C%8Mv8U_?UvX537Nb48F|><-^r(WXBn9% z_iFUOJ!ed8YGeASzlH+yW__>l^-?!pPm?-G#w?VD=uz$Fq4DQ_Z&uvDk)pP-){lyJ zy2{#jJ}{Z5NitpoK~`qs&&$Cv?*lT~1!~SxPk$b%*3C;OB5%G+%OeEb(PW-Kk@fx< z`)wAD@boQn)MKQ?#dZ;k-7>1-%^m6ygm~XB%@fvUCzRJ=Jy+A@?w1Wh;;2^h)wtPD zv}ml=yp4f*W7MG%yjN6eD1E1(E`a~UMQHk5q%BB$CU(~xNfZs`D6id&CM)|OSbft% z-%M%ov0u^Z^;QECjbreP&g{Q_+L+Rs7OiM!XqzM??mHr<2U9=v2uKMicfl($r2MOb zmh3$Ua^)e)y5@Z8r-k76VQc`zF*_wlQOgcw)=~G(N&_O1s2}|o!WBIc+B-FsChAKQ zO`3{(#ZxSCm+4fD*lF?sXDDJyg+Uex88GF{;ryH&Yb^}ss*L=Z!?;j)=ozl?xcS&^ zeK1NHZiJ59ZgJbD+UX>3M78_pL(6iw@iQeO;fOGjlay$KSvP~I>JA+q5bjiPhD?D3 zDR6H?(Z8DYhe3h$tEdKD0_n-peP_OjvaVJIMa-wO&fR+eou{bC8n7aRgc0HbkDHhk zEjoxmSH?+1kjkB-Rn#msgLoI7b8c?t*=ki4*lkiMItn}p;QF+yjf@8kbyPwa(hOHe z=CUvgg9NzqW1vX#;#Vr- zZ45yYg-rH=-Z0I1@gc)7APzgV4<3cUIz+N$mKb7mie2+6g_YYS{$BHmp|o2r3Lm(86O zbnaef#FEI`B&q9eXD!!0U*&}&{zRjB)-s>!;D~vZ{Iy%ygaIbj5%8?fLpXhtpnzGl>_mD z#pP!%<3jk0>_ZdyoSBRyEeAIIv4#g>U7>p$`jCeQ+sg?MIJTpq2w~~`n4klV+kEz) zi(-vJublqFYSKbLDLojRw9(K?4+<-p>{9w1vm5{rh5mtyx|dg&I?kNM+(zdN z)x$+~!^7o$h(jA7o=Iw4u3|p3?5^_*6L{5*#A-h*aUcg2mq{Njm#aDm*C%-*Y8l5e zo(6FP)tAZsrV0|u$~F848UQW3!1FkH^Eiz*U-is!CBHmjQI1D^W*>;N1;Ve|%9STl zX4>t+fDY9EXwTYr2ifCn&K*HSyW-?pUTyrNxWjk5HtGTO1FA$-hQCG z+D^melKRW5yfKH}aVt9H4ufnS8am{!%s6PDcAo+*s178FVr9wNW>duBijAChMoEga zHc$GIwQM@COaZClPS?8rl(HhozyDY@7ERuYRMl8$Pt2c-M8B=kgoKxvD?yWyM;~BZ z$;Z_e|NK;IvbpoYobQvg9{u?$tP>M24^<)7s*EXkkN$j$4MeH z99S$(;8EK^R4JR@4H`l*hIzd$Uw1?jNzPBZCK0k~z(jL&^&JJ6ropFxP zN}2sNCLsKg-_ujcxU^@Y45w&aW^jL)U#|O%UsE^~s-pLX2~83Edkp5Hs{0z58N^~R zD`r{dvb^I7)SHvVIdYzCeP|#yFTm4go=6^x$DP1Gc}W*Aw{cvGi}}LM>RT(lw%>7# zR_Bfd@tg=#J)G|&xA(Z8}vKY8tyw?y*D<@(;LBQw$$-GPe6LD;7{GsJyN8Ajg_w8}7a?icODGe)V5*187|9>8lXrv;O>pLrh0Cy{=2LmIS{+>i=VZQofg z^h#fSPUCUo*?|IUC+P1QsMqGHvBmDAywG{^?GZlAgN5Es?tKk!ljDs7)pJ~IXOB_e zw)?jhG&hk04TdPi1-_G=^j>@{-s?2aD~xsUp^I?aaU2Ebb&?l`KOM@5I~5itXzlLq zK53jfU-{xLFkgGM)hi=3kl_tre5;lPo!lYfUnoo}8l63LPcGl(@O?5Vt=FzYt+6@X z`t@~xlj1ZeMU7mymA+{!2qAZ;y_?eO5*k;>C}>8!0H-gRggT;VzRcZOSpW6R8N@h1 z8i$USs0l(`(j3ziSHaIau)wvpwGlP@gWuYCd_zm2>1#tHezm?FSWLm~xprnl5 zIdb`c8(So#x|m5LlBrF^jUj*>yRM4_^gm1jJV#qVi%RMSsEOE9>9@l$m4lNQqb)_t zO&ao02pN#%R_|t~waN$Bv{QM&w|esfG8Z2I0-&E`dqHSWvG=L7+9%Me#=+;TBEwXDdreY4+&mm zERLLOCc1`Ew3$gXQVle1f*_9bB4_}!Ac)HF50vmqwXD}YhUp7_H3+~zlILJ z%^K~!KBD0-xGsp%U{K;?M%4?&>q2~X{jT#vn_-hLr>L6M-wWvt;UtG_;;m8M`j8%I zz;4?X;j}f2rhqh{khJq1_Z*I;_lR2Ul-r@Mg2n~$+mEwOWu0HI*Itin)QJz7GO80N z_ET&})kB3dkHDaGP;nv32Mk`Ikd-?ai8?>;>sy5&rYPM9l0+EYc!<35n(IhK(!!N4 z7N%0jI4YsxoNucgzxrJE&aVM z{t)ds!7m}M3igR^E>+1lH3wt|5U;V&%ExG+9M9CWu{dRtHkO=uBaYdv0I4yV#XSI9 zFjZBW7OknWlYf|2=5sB&C+#737LpI>8}aALXt_N5d{=OQfEfZt=?(xQM7R*u<#0m{ zmR>S}BgI4fP4~Fj=Vf&+-1B50d4yc?e!{RmD9%5VGv(t|4&9}2-PcyM z>FnjUHArSV70(huB|zLt6=zHe?Q-wuQ04N!W57dsc~MVP48xFpO$7smrV%iMS=zh)Q0wa<*+4?w2*GCXW%n7mE&@I8d3hI=&G%|&cpA;SOS;y$1Rz=*o5t@i zIaba33c~#JSR6H1V{1^FYuVC1ZC+5ptX7(K0(&ywm()WK45H3Ptd|m;k^H98Ox0@} znuhb7lApBbQU)EqgeR`6Q;NWyw}7SIyym6>3)y`({?#0JDeZ zV!OJS4{Gm0se^L4jg?g%bO=8JC8f?A2Twx8T!8~zNu56Ru7*S5U>S#8G1(0-?=DFL zy;4|9M|}gT%MWy2#17$K7;?f#E~1Dt7QKm$n;L6{(rB}64IEy#xxnirD38H}YTxq? zeF_AzaCE-Rk*dD=>mG^PdR=ggqJaG7G%JicON_6l=8VlmZm$6iQh&Y5^L7^616$_@ z8$lQ=*b&_LIz(^tfFoa^MtPlao(K&9(5&Mco1=E0tz>=@J1H)J>f3St@9DQaS#Ohz z$5MlwW;PW7NWY^-dZl5hD)!R)K9)wL+>{99op&(&VlB$H8a)revZ4Kw3g25N*wX= z@(LiZMtAaZIi{O379ySN-o>n1JYY?vwneP2T6k0{%Xstp1^M1Vb=f~LZ+VT-`(bv& z!t2s=mIU{0E<);f&l|pdbho|0c5$9yCX!gESv-6`yFpsFiA=rITug~*#?9Is6K6VF zksOb=*qWs#esSA_z4$j=y~oJ7OT0@vr|BX3IH4a__mbTD;2lg|X@(a=Ze8bPp4{2d z=$-6Q^){N9XNP{ANAGjRftFD#4{r9^HVD-OZ4_TaC+^u`q#bxg|uH^ zX3MpD!{8;;=uh+DTonE3oTo^9`%SrYR+4f36Ee{Yv_j6#9zMPH!hF+%fY_iRdciy` zNJly^DH2az2Aqausc;S6zgLGhQ@MWu42_9jGp3Rlq9npZl3oXcAZCE#tPqs_mwpeQ+m`%XbWoX~JCcb^w4!(Fgz1j7_Cs33cLd&4V_k2*lb1g;wDB!wyt$WP_zU|f*seO*@qjrgZf zNC-}Ci%HAe4!d%4Nel=XK`?yN%*(|+dg6C_V$=GNsk=YtxLD4(m6s`nh2u*gy}NSW z0O*Bd^zESe`1VG4Tm}%j))1=0ufhzPVS_x1xCsy<@Gzj4Me?{g0X{hmLgR_>4mima zy>s1U4P&ZCJmk-o)hhDBxQ(E98m=7n02!xt@cDfYjolElJu+@7-u#%@<#TUu%ZVQT zfIMON9y$&HBB8T{Ft6>wRUCSqG&3RCHe$3e4L|a9CEe+{3HS|6Bsc)xn4l3}3?4tU zQPrKR1NQ_b=~}HEmtNi2A3re-JLR226&R-GU@*~%HhZ-va?%WY0iyezVN!hQ^~=$0 z5^1qJDEm~O##J;aEoWnpOo;5uP(t0Uuhy->+7Vtr-_Fbu00Dh;KK~fDiW(9JB&5`_2LE-tw7WII z#W2RDh--1PwkJ!~_ltB?+m7Kmp<4wsO@A^*d~0ch`pvk~p%5Nd0Wph%E49A8kc2f@ zlWQ?Q*x&)}+12$TDdNxwV2zKO_vJpIuGt@!qviCS?J_AO5Yjlr@z6{g=6M@-rnFD8 zT}cFiJzDFpVJyzJRSe?$+iMzXu2P(VSR7ptjx667)64*=;sUR;JhmuhTN+876?*@ax)sV&O&QcP2sFRfr@BGwRm+@Lty*tHy7Yeqoppmvk9Al z@@00`#v*Ad-Yw}xPzO<7Q;bwC8h@<-- zw7v~kDzdl+UCe)dUH|t~@a(^Mdz~u)WI_$%?5Y*t0RV1R5KBN-TfrJESqt77gP(j^ zqG3Go)pxsXSg6hpTC`8uC7bS|P;D^d?p6M6Z@?OY2>xtRD&O4zokWBjysDj$axYYs z${Q`i?4_=I)TZRj^CaLo{WNrp6w>m|+Ro}cbM3ChRqVsOZ!L~b&43|I3y)Suj>ETA zE2UIdfkYUY)JrL*dg*l2sFkLB#gc3(N3=j^+r)T?NM?A zjMFB%kN>iuBsh}}{JVVU)_HUuYLyb17qh@yi=t4Ls5yzGlUnKo2RrlIQnvyA>m;m* z&NZpBh4qi;o{QFt5T3)qG(WbH%WqKF`Ylk|p1(HHkvZvYbkDj_rOjw~d!{F-PE%|+;%(v2qw zrSJY^bD?)W!^WZjiqjd3NhG-%FNl$KM0PSrEkeU+OEL^Bw9#M9pGT%`*8rNUj7U~GSSlx> zFE8o<%3!^HRHP_f;>XnefpOE)1EN9X_9j>bt3XVZOzNimS^_qN&M&{!)@;Be+rraN zSO#uw+XGtX$&ImX&V}6PfpM*jQaqlQzcc;%aL)dYATUY(kJox3!<$StNo_bYkaPrl zOR95&V>8mZNrcJ}l~_lvCw5DMN8K0xk@Rvj^A-9vhjS7^?u(9RlieH?`FeNM#m(X{!iO7d=L%=i~xTE zK|J{O?OV{foM&f$E*CUOq9p<$!>K@ahv|7j%=lFsxtuhxN0NP3PZx(9l)5!Xfj@wz zTu_am_Qjj&f{6im@`Onm3&(y4fMGON+i^^z#C!ToS8Pbl&?~oAa;L{FEgo}zGz$#R zhi5;$9p;XgevFVLE8HntX2xlWKuhQBD6E+!9NK!hyqc}V34pDF6aDXHD~JcQqR8b{ z@@V`Dy-l|i*ZN9s+pTXTK_t3krE$i2c2Xy16NV5Q>j z4E60qo5c}7&0s8gk`DPJLaaRhpbNUfFl<_YtX4p~=9VNyb1*Xx14NQXWQ9O2rME_6m4I zr^TS13axIatg~qd3am4rKadk4SIBl>a-xWCLOdb>WC7?qLZ`og=(9Z%CZ_k>a8Je8K`sr#gNY4D zIe7Sr<|ILr4HK2(DYiFQ?XItNie)hQe9h+90 z6~6XYNiYMLS7R9&tVgr-c*r!1!*Pz*uZcF)lv$3LG@UYKFp6N%?3wG<(3K(V+A!sr z#FV?YVOrGeZsCH+jAJF6t+b;_By)c6ZNSCoLFJ8cBVWP2Mq__3^=~YWP0dJjUnjE< zpowdZnyfMVrZ$+@NC}7F0bV|DT+|^`hZnAIFta2}+FItYeQ9C@+VcE2j{4)bi&tm7 z+KN_)gKsWw>ww*OK>_F3X1+48gW3-$fRbbHCa^S=z2dxq@vRh5EqhA(G(5lH zNQk6SKiRore74E*J>U@+g`=gJ7Xe^fTy10zQ!94%m+2wBP)XYI43(=FiW>B};v+Y0 zE|iagmOV>gSV=`?iuMh1-ZxKze01*%#)0vdtgtb|1BE~6)4a4CHmq1!a^??bJ3rb@ zKtH1~X%4ExL~Lq>V9RD4Ol&-<l0+NU$yyVjc*91P}%?rCCr7BS3l35SG8s5@KzV{7ZXyfF;)Gdr1Vma0Cuy z0))RxMp@ZpBXH0JP<({NZR%}DECu=L?1O9JMOeIc+HSy6x`C(%(4{8u5ph5=yTc7t zcK2RKRB#53DrE7avVg>=<1s_~v@u#yVDhfGQpTW^Ih33k601)pG1~fV`ALeg7kgk4 zOh382k`fFsi5FLyzcj=Lz&+aZRE6mRWvUz0F5H~lHlq6z#hS5 z0%%^dwLnE2=Yoi%nX<(4&fKS6ooJIw(kXfJb;6qIBVChna1fqp*CZxMv?f+VFY}>U zAPKn~aB28`AJ*&}3LdeZ;2 z3~;P3E&tWl7LS9F3#jdS+)HS!fVQ45FWb)&9m$rGpZXsx@Q;U`iL&5O5P-O0VoERB z5cTh0;=U5!NCtdoL4 zU!MqgbRQforSL_IEjk^WQ!8or5U=6%_wvGct_t3@6L_c#rB~%MQ>+E=DKLU zDR0Aaip%hClJ1k4nq`a*5#8TAY>F=OJ?|5d4R$ZTPV+}UExYtOC;1>6pPS-^S04#C ztjFc)!%uCl%r^b?Y;jkq*0XdSN|NjygZiLB|0u<}RyFvR0^QK+Erz>>)p*`cVcZma z8#R*^x-40%jb_3-^z}f)&BJUM>J#4kjU?^guY9lf@vUVYi9T#Dt<=)Ojv*98`$)jK;gT8YC6N}Lf}@SwXOMOD9!@mhWh-NVz7g*hyIP} zlz;}YzrYHQKv{IoXWCVQwISxvk4gY?1^`X8Is>d$brWi4kpAdrB$~@Y9_N!QK-+$ilJ)2n zgD?6{do!8~JqyZtq4!{YmrASO3M?A&eY8DZz#^Hrs1Jq{%&!KR7_X0p6W+jEAh(Lr zb=RE(iGBEfr6l%xjX99M_d}TSSv&#T|NG!$7@v-kT@gQ5u)_c6T0*!3fATY*A@vfQ zDM^6;B}%QQ8BqbyCr7ycEc7=DGL;HWz6m5}1bMY8V=*W6PEkMBqAxdEid!Ex63`Mz zcA+%(x8QlWtr|p4NdR8I5U|4sNiI^g>24?Oms2T1d~bVDoQtrHm4@T_a_cAE?eU!O z`7w3W*5W38u4GqUxC6XSC;ay(6qZ59%5nSE)OArnAenS$6;lR>xb@^wh3KPJArVbY3vuv1?M3kOz z{}DpT_Rz;Hf)l3YIKqGaq|k-uJGeE5oN0Jo(RJj^aQ}3?1H$uZ@go#Q_x}4 z>;%eTrhI*-M%YuM`0dQ>v(K_^H(BAQXo|z*9a7eqW+-|hx87u&Q{7R|{eEKRt2({K z$m)bO=f8*CpI$Fslz87kFB$cXx81a&ikwfZ4FL7cq$Bgt_)5M2_tVt5_=$gwR3D+; zuSn-rHMxA~Eemid`mY*kQ^%hp9TEwps`D7d08cai3xt~@FV@R4$W%lLVqh#mHD+GS zgBX}o2%_RWz)(B2Ma|^BKnOwZ2nL}|8rfr!GD)y<_w5U!r#&>PjoXd|Ee8+}Yzx}C z3muW$GzfdOt0%%m4V!Auzz3+g2SNG_jUL+`mfQh4>dAn37mQLRB!Fn%34Iu!g4a2(z+XZzy*5TW@&=UY4IXa6&TaI;fq&2QHX}c7%Xe zOxT`kxo2b|Ji+Dygfftk0u=(xGfEebbfI2O8K2UYJUZRIaOEOm#o7-XuL=h6EuPVE zBifqs+7GvY|3LeB$Xv(R0xDvXunc{3DiB+lzC*HF8HuFVn;wxS5B^Sto!X$TZtUau zmJ0m8YhCu`+w5d#9hJ-u^4h=Pn^nuS91g|RhgfJI#3+$WXS--e4Og;L1Jyd8N^Gg3 zIXB+>bW=d|RdI2!SF?FO5LJ#C2~SNEuk@zAvZsp^Q6p=%s_f=h!2|jUS1ws|XJ2H? zR}x>KY3`OEwIlsnqg>2>M8m^q{auR4YV=*}ymVS=2efsgb@`MV+L6#Zei8ocwiAB0 z^ji1=H3=#;i+6yAEWlhu=>YZ4>tF3nst`jopLO&hp%so-^|*|j|8Ax-XtV=pw%k2a z7Mir+C>`I}%0PA(4x0>1!zRW>rcn)YrK_*LwSvF{+zXnXnEk$gfeb>Nf~zdqi|&aq z;hYUWlGo(QI3Pes1@E=!>fQ51^x~>Az~rU8X?R{*d>_3gq7@u_54_Q&etu-?hg~E9 z-|V?D57>Z=fCPr3dD{!$3(H||HoVX+T{U`)DIr~$DSP^HtSMXtfOTnLThIt}V7S(h zGj${&40tVuV_AtmV}AzA>6<18wG|L%j(}*v2a+qhtAx#Pp(Wpn5@u|LAK0Os}ax7I>`^{VwsXkc-Ad>z7$=0!v z*N~(9rHPYhkIx&Za=EsYMyA!CcG&^X9#MlP6upz%i`wSix13Wnx;@Y-pmp*syQh@m z%W3XO_ruu>*x)dS$RgNKTz#Eb6 z3>vgZY!odq$Sz=s0bcwElQym-L^VJ!*!qa`%t30sOMmp2?ycHKXwY#iJb)Jwe z;e}yw?)P(Tu~x9{lOuB9oH^>G)JV=hLG|S%KJ*{$%V4*TIj4oyxx3c8LmtzF&Qvi5 z#*b@5Pn$ir>*?`~XQ4#sRz1X_I|fi&JLsE>X&7(l4sPiU*uPcKyp+v1#&N~)ON=@G zviBa{$i6@Q`bu7qOzGHFTUkx>{ApaO(Q#o;lCdZ|KE#sX`m&rKT|X~5~JxQ6n$S6wR`l&|;P#Gfi-WH|KNZh>r_=jmt>$x zngY-UqZ$Aw1Nny!InCqHndtB_NP6JOTgM(ln(Z)~;pU*KMtw&L-hjssf5L?Lr4C9? zD2ML@!ekY9@U6|X4gatlSDXK(`*m;~PIn;(^aB44K~(mG;)eUzGW%-$yl&aZ0qoFU zzr$WmD|{$0OC1bArz3PWeh==p>87hzMiHGsYE~{+kraxH{CO@&8u*7 z-}tV&Yf1^$-~933fe6b{_LLM+b=-U z*14b9G+=l?+G^D6f7zG(t>am3XYu&Na=^1c_gsGkGMg2A0=zz3*#D*FzwMyCBJC{j zc|O=+xB%>5V{O%+%KIPu#OIdyAN_WeZPsaIpy!@!v#=lDI)noT<%qX@x!qY-kI9zv zRf*H~{i91*iF6<39&VhR{itWON$BpY>0ifZru>q|6$Gj)!0yflgjfC&ws3^%Qrs*@ zt*FuGZ06|U%6DE#IAQCydYaq@rDL#Rf+p}T1NSTPo4ujeVU|mz9{Ql_cSWmItYb_6 zx^K(Salhg`oq=bO+i&ge{BAl_`sfXTzlCyS;`DV5vgVSTdyE4QQ1CQrL5|Qx$(gCa zD6I#&==3>Ov6bfH*z_ew)XRh^qu7_h9W;L#GJN@}Uh`5_HP_P^-c2M4?RcM_dSrlh z07s&*XS3ss4>mKXeReRgXUe%3(#mO$R*;=62QAZS#_rqJ6KO&&#PcwBwX8z=Xnz^j z8WW@EUcon)1vIFghkwh}$ygkF3f?%V1Q7?e6<%2L>-*lReX6_32eDOJA$%Q~Y4 zV4y*$zY)wIh}s{C1vGowo+HgbOcCD!=o$uRBqalHdJf}JZ4bsK{ToTBB6-oECAsqT z&!f=!+}_b4ss z@D?j5-NRr~lMJlRJ=jne1kG1hf1lx`X&N#0UF<2a(i&jXp`4eK#Ek(h7uY1Rl!AB- z4h@-pB+W>O%}(}v&YysChh{zo_ zLW2=}gZ9c{5J0@Z(3G6~fa>_L4Kfp7>P#62O5x#%9TFty^%>^LCtsKD`+)XNTNt!* zMcf>r4eQ_)Q!^AC_biPA3qy5`H~6#tYG*5(4->&Ja`|5dUBvJt0rQg zpS7=upzGU3UGozw`I8|cw4zv!U!UDY4>}cl2dc2mlSLj)41c}7Wugj0?Cgta(g;xM z+>ljUHt%f>m^!|my6)SpEABwJ2yl2)n#-7BeI-S>v)+qiG{z5dBE;d_n>EY(1wH<7 z8=iD2ND;r5@N~kfS>)m4P7Z}bc=kv8Roydf-uK{3@6mca@{E6dyN zrRVoFxZ4*O=ivi9iT<7m0e3F+Tk~1Ef?KTPEdk!xcpRZp%+^@77&H{`!XTVsnR=IU z*y6d-Wbm68daa|hzL*^MTN{-MYFI$JD{)5abzlpiM=d7v1)Pv_u+4CnFP@{FUI)<5 zNClS(v_jTWM`jTLB;4p4?6~%nrKNdd_AN1)g~OV{JBt#$E%VnUw5iync8_PZG}V>X z;h#X)&3mRTHpVvVq;yqAX!e@bGhWrjtgM<@{;KctnVPVDU~NzP-I)wg`IHlanYoiS z>oAfb!7`k;>`<+W0PtrzkHoWJVQs1Indu!jr+UBq477WxrArb&qNAWGP!|J$q3@ks zLN+(zo{@L?{Z)zg$9pfU%N5aF@AN1TpiM+C!X+aya%f9p71Vj)(Lr1?c!TgfCQ+0^ znDcNUyQcX}t@rJ5squx#AiEtBHEr98v)xA7rT9zY@x9 zi_im`9lv(5O%o#(2p!KV0MNK&@MRXRvWE+%eD|j5E||FNGBa2St&$O^!iS#33Bsqw z-|9hFC&5LaWcN9PP*R**-C(3*b$*}wlx@N<7R4qJQ{z{F1ONh8fIF4DQtb6u3X39s zQuXB=Rmog3j= z7LQOQV&>NQe~=3)()kuMRTGxL@A?cxb1E10zGz9Nc+*KN4_PzY0{Ft?Mx2?w6 z6v^F}>v>JhtEsr zf0aFdtWuyy{}K-H!5II=x1S7+q~!kB!P933b$UzmasN5D{&ydt3!kftRG>QDpJ0N+ zhFYpdBT)DBHK|$zEFUy1g{6pW$?YqHuJmu;%tT)8eXWuJTCY_L0D}b5IXJOS&2i#F zSgfPtQzP}_Qm9+)gI<{Cd$$ihG!4fB9W5znFIpVB4D17V7=aSuB*L5Pe1d-a)UmP& zN;VXru8sO?j7S&&dZmPs>&4A5>X}d{!rfP4bd)nZZbD0!i~POI+CQ23z;oIfjw zSscj|JPUavpDi2>0+x`z3p$qwds>|CUN)EpV;xV3GygmM^|S`D*br< zAQ$5Skpap24=F>)7D#)m7C>{7>BAC04k5ios^)+i;`h*~27SB$a6us|CebFPzE=U@ zOJEMfd|(a$N}I$9g4(wo{NPm%2e$fAzX8H*uDwK#5I>#2iSj&dwnA#v1T+GW1Vo7k zP=n%Zs(rp`uY{1XYD<6+CcW5SyT1zR@btM=gK)}@%CIpWgnhnj+J_p}2j0Oosh054 zY*npQtsJ44m{B%o-p>SH1p1UTE#O6jLf z%g;K)KDW(Cs;IyH4(dxzn5JA3sUJo)WPut0=dWN_qmQL_v-1!+LK!M+hh03*SeawNm$rTY3Im`&Br@QmwWBWedErhI5D0DA&h!%Fb|>QQd+zah}3 zc~mRP03{YGz-J;J8W8pm?;1Q9@x63f_h%VrGf*pZ_rT)@f|Xva-Y(Q7$yRN7GXn5X zFZ_+NA}Zk;CJw6J=ovuYW^@u&Uqg+^g&VasPkiheTYSY{gPfJ&Z@HC%#d~36U>yBlP-t6gHdk*)%qLW+9i}P9h(V*}K{;wh zXt+i0DPyq8KNQy2&B&iPy<>{BF!HAbXaqjq`wR+qXg0J&!=nzLNshayk0lu4@~h(*01Jhx$6=4o%X6xp{PZ8XaM+J1|24Aa zTXd8`o;~pZbS;552J#CVk`1W)g74c@3&QoL;K&6){slKro`nuAa=w{hbNS@rBNMI= znRvr8gBmZ;@3Cu@g*05U)r14Eo2=yOyOSs z5@}m7;E!e8yw4xTVCGc9Cd6<_WP&_8Gyp*<{4;!661o2uV8Xv(3;+J3NW4< 0 +} + +// Flushes notes whether a FUSE Flush call has been seen. +type Flushes struct { + rec MarkRecorder +} + +var _ = fs.HandleFlusher(&Flushes{}) + +func (r *Flushes) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error { + r.rec.Mark() + return nil +} + +func (r *Flushes) RecordedFlush() bool { + return r.rec.Recorded() +} + +type Recorder struct { + mu sync.Mutex + val interface{} +} + +// Record that we've seen value. A nil value is indistinguishable from +// no value recorded. +func (r *Recorder) Record(value interface{}) { + r.mu.Lock() + r.val = value + r.mu.Unlock() +} + +func (r *Recorder) Recorded() interface{} { + r.mu.Lock() + val := r.val + r.mu.Unlock() + return val +} + +type RequestRecorder struct { + rec Recorder +} + +// Record a fuse.Request, after zeroing header fields that are hard to +// reproduce. +// +// Make sure to record a copy, not the original request. +func (r *RequestRecorder) RecordRequest(req fuse.Request) { + hdr := req.Hdr() + *hdr = fuse.Header{} + r.rec.Record(req) +} + +func (r *RequestRecorder) Recorded() fuse.Request { + val := r.rec.Recorded() + if val == nil { + return nil + } + return val.(fuse.Request) +} + +// Setattrs records a Setattr request and its fields. +type Setattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeSetattrer(&Setattrs{}) + +func (r *Setattrs) Setattr(req *fuse.SetattrRequest, resp *fuse.SetattrResponse, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +func (r *Setattrs) RecordedSetattr() fuse.SetattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.SetattrRequest{} + } + return *(val.(*fuse.SetattrRequest)) +} + +// Fsyncs records an Fsync request and its fields. +type Fsyncs struct { + rec RequestRecorder +} + +var _ = fs.NodeFsyncer(&Fsyncs{}) + +func (r *Fsyncs) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +func (r *Fsyncs) RecordedFsync() fuse.FsyncRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.FsyncRequest{} + } + return *(val.(*fuse.FsyncRequest)) +} + +// Mkdirs records a Mkdir request and its fields. +type Mkdirs struct { + rec RequestRecorder +} + +var _ = fs.NodeMkdirer(&Mkdirs{}) + +// Mkdir records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Mkdirs) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedMkdir returns information about the Mkdir request. +// If no request was seen, returns a zero value. +func (r *Mkdirs) RecordedMkdir() fuse.MkdirRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.MkdirRequest{} + } + return *(val.(*fuse.MkdirRequest)) +} + +// Symlinks records a Symlink request and its fields. +type Symlinks struct { + rec RequestRecorder +} + +var _ = fs.NodeSymlinker(&Symlinks{}) + +// Symlink records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Symlinks) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedSymlink returns information about the Symlink request. +// If no request was seen, returns a zero value. +func (r *Symlinks) RecordedSymlink() fuse.SymlinkRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.SymlinkRequest{} + } + return *(val.(*fuse.SymlinkRequest)) +} + +// Links records a Link request and its fields. +type Links struct { + rec RequestRecorder +} + +var _ = fs.NodeLinker(&Links{}) + +// Link records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Links) Link(req *fuse.LinkRequest, old fs.Node, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedLink returns information about the Link request. +// If no request was seen, returns a zero value. +func (r *Links) RecordedLink() fuse.LinkRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.LinkRequest{} + } + return *(val.(*fuse.LinkRequest)) +} + +// Mknods records a Mknod request and its fields. +type Mknods struct { + rec RequestRecorder +} + +var _ = fs.NodeMknoder(&Mknods{}) + +// Mknod records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Mknods) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedMknod returns information about the Mknod request. +// If no request was seen, returns a zero value. +func (r *Mknods) RecordedMknod() fuse.MknodRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.MknodRequest{} + } + return *(val.(*fuse.MknodRequest)) +} + +// Opens records a Open request and its fields. +type Opens struct { + rec RequestRecorder +} + +var _ = fs.NodeOpener(&Opens{}) + +// Open records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Opens) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil, fuse.EIO +} + +// RecordedOpen returns information about the Open request. +// If no request was seen, returns a zero value. +func (r *Opens) RecordedOpen() fuse.OpenRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.OpenRequest{} + } + return *(val.(*fuse.OpenRequest)) +} + +// Getxattrs records a Getxattr request and its fields. +type Getxattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeGetxattrer(&Getxattrs{}) + +// Getxattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Getxattrs) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return fuse.ENODATA +} + +// RecordedGetxattr returns information about the Getxattr request. +// If no request was seen, returns a zero value. +func (r *Getxattrs) RecordedGetxattr() fuse.GetxattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.GetxattrRequest{} + } + return *(val.(*fuse.GetxattrRequest)) +} + +// Listxattrs records a Listxattr request and its fields. +type Listxattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeListxattrer(&Listxattrs{}) + +// Listxattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Listxattrs) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return fuse.ENODATA +} + +// RecordedListxattr returns information about the Listxattr request. +// If no request was seen, returns a zero value. +func (r *Listxattrs) RecordedListxattr() fuse.ListxattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.ListxattrRequest{} + } + return *(val.(*fuse.ListxattrRequest)) +} + +// Setxattrs records a Setxattr request and its fields. +type Setxattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeSetxattrer(&Setxattrs{}) + +// Setxattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Setxattrs) Setxattr(req *fuse.SetxattrRequest, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +// RecordedSetxattr returns information about the Setxattr request. +// If no request was seen, returns a zero value. +func (r *Setxattrs) RecordedSetxattr() fuse.SetxattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.SetxattrRequest{} + } + return *(val.(*fuse.SetxattrRequest)) +} + +// Removexattrs records a Removexattr request and its fields. +type Removexattrs struct { + rec RequestRecorder +} + +var _ = fs.NodeRemovexattrer(&Removexattrs{}) + +// Removexattr records the request and returns an error. Most callers should +// wrap this call in a function that returns a more useful result. +func (r *Removexattrs) Removexattr(req *fuse.RemovexattrRequest, intr fs.Intr) fuse.Error { + tmp := *req + r.rec.RecordRequest(&tmp) + return nil +} + +// RecordedRemovexattr returns information about the Removexattr request. +// If no request was seen, returns a zero value. +func (r *Removexattrs) RecordedRemovexattr() fuse.RemovexattrRequest { + val := r.rec.Recorded() + if val == nil { + return fuse.RemovexattrRequest{} + } + return *(val.(*fuse.RemovexattrRequest)) +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record/wait.go b/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record/wait.go new file mode 100644 index 000000000..bca723772 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record/wait.go @@ -0,0 +1,54 @@ +package record + +import ( + "sync" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" +) + +type nothing struct{} + +// ReleaseWaiter notes whether a FUSE Release call has been seen. +// +// Releases are not guaranteed to happen synchronously with any client +// call, so they must be waited for. +type ReleaseWaiter struct { + once sync.Once + seen chan nothing +} + +var _ = fs.HandleReleaser(&ReleaseWaiter{}) + +func (r *ReleaseWaiter) init() { + r.once.Do(func() { + r.seen = make(chan nothing, 1) + }) +} + +func (r *ReleaseWaiter) Release(req *fuse.ReleaseRequest, intr fs.Intr) fuse.Error { + r.init() + close(r.seen) + return nil +} + +// WaitForRelease waits for Release to be called. +// +// With zero duration, wait forever. Otherwise, timeout early +// in a more controller way than `-test.timeout`. +// +// Returns whether a Release was seen. Always true if dur==0. +func (r *ReleaseWaiter) WaitForRelease(dur time.Duration) bool { + r.init() + var timeout <-chan time.Time + if dur > 0 { + timeout = time.After(dur) + } + select { + case <-r.seen: + return true + case <-timeout: + return false + } +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fs/serve.go b/Godeps/_workspace/src/bazil.org/fuse/fs/serve.go new file mode 100644 index 000000000..efaa4f143 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fs/serve.go @@ -0,0 +1,1316 @@ +// FUSE service loop, for servers that wish to use it. + +package fs + +import ( + "encoding/binary" + "fmt" + "hash/fnv" + "io" + "reflect" + "strings" + "sync" + "syscall" + "time" +) + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fuseutil" +) + +const ( + attrValidTime = 1 * time.Minute + entryValidTime = 1 * time.Minute +) + +// TODO: FINISH DOCS + +// An Intr is a channel that signals that a request has been interrupted. +// Being able to receive from the channel means the request has been +// interrupted. +type Intr chan struct{} + +func (Intr) String() string { return "fuse.Intr" } + +// An FS is the interface required of a file system. +// +// Other FUSE requests can be handled by implementing methods from the +// FS* interfaces, for example FSIniter. +type FS interface { + // Root is called to obtain the Node for the file system root. + Root() (Node, fuse.Error) +} + +type FSIniter interface { + // Init is called to initialize the FUSE connection. + // It can inspect the request and adjust the response as desired. + // Init must return promptly. + Init(req *fuse.InitRequest, resp *fuse.InitResponse, intr Intr) fuse.Error +} + +type FSStatfser interface { + // Statfs is called to obtain file system metadata. + // It should write that data to resp. + Statfs(req *fuse.StatfsRequest, resp *fuse.StatfsResponse, intr Intr) fuse.Error +} + +type FSDestroyer interface { + // Destroy is called when the file system is shutting down. + // + // Linux only sends this request for block device backed (fuseblk) + // filesystems, to allow them to flush writes to disk before the + // unmount completes. + // + // On normal FUSE filesystems, use Forget of the root Node to + // do actions at unmount time. + Destroy() +} + +type FSInodeGenerator interface { + // GenerateInode is called to pick a dynamic inode number when it + // would otherwise be 0. + // + // Not all filesystems bother tracking inodes, but FUSE requires + // the inode to be set, and fewer duplicates in general makes UNIX + // tools work better. + // + // Operations where the nodes may return 0 inodes include Getattr, + // Setattr and ReadDir. + // + // If FS does not implement FSInodeGenerator, GenerateDynamicInode + // is used. + // + // Implementing this is useful to e.g. constrain the range of + // inode values used for dynamic inodes. + GenerateInode(parentInode uint64, name string) uint64 +} + +// A Node is the interface required of a file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// Other FUSE requests can be handled by implementing methods from the +// Node* interfaces, for example NodeOpener. +type Node interface { + Attr() fuse.Attr +} + +type NodeGetattrer interface { + // Getattr obtains the standard metadata for the receiver. + // It should store that metadata in resp. + // + // If this method is not implemented, the attributes will be + // generated based on Attr(), with zero values filled in. + Getattr(req *fuse.GetattrRequest, resp *fuse.GetattrResponse, intr Intr) fuse.Error +} + +type NodeSetattrer interface { + // Setattr sets the standard metadata for the receiver. + Setattr(req *fuse.SetattrRequest, resp *fuse.SetattrResponse, intr Intr) fuse.Error +} + +type NodeSymlinker interface { + // Symlink creates a new symbolic link in the receiver, which must be a directory. + // + // TODO is the above true about directories? + Symlink(req *fuse.SymlinkRequest, intr Intr) (Node, fuse.Error) +} + +// This optional request will be called only for symbolic link nodes. +type NodeReadlinker interface { + // Readlink reads a symbolic link. + Readlink(req *fuse.ReadlinkRequest, intr Intr) (string, fuse.Error) +} + +type NodeLinker interface { + // Link creates a new directory entry in the receiver based on an + // existing Node. Receiver must be a directory. + Link(req *fuse.LinkRequest, old Node, intr Intr) (Node, fuse.Error) +} + +type NodeRemover interface { + // Remove removes the entry with the given name from + // the receiver, which must be a directory. The entry to be removed + // may correspond to a file (unlink) or to a directory (rmdir). + Remove(req *fuse.RemoveRequest, intr Intr) fuse.Error +} + +type NodeAccesser interface { + // Access checks whether the calling context has permission for + // the given operations on the receiver. If so, Access should + // return nil. If not, Access should return EPERM. + // + // Note that this call affects the result of the access(2) system + // call but not the open(2) system call. If Access is not + // implemented, the Node behaves as if it always returns nil + // (permission granted), relying on checks in Open instead. + Access(req *fuse.AccessRequest, intr Intr) fuse.Error +} + +type NodeStringLookuper interface { + // Lookup looks up a specific entry in the receiver, + // which must be a directory. Lookup should return a Node + // corresponding to the entry. If the name does not exist in + // the directory, Lookup should return nil, err. + // + // Lookup need not to handle the names "." and "..". + Lookup(name string, intr Intr) (Node, fuse.Error) +} + +type NodeRequestLookuper interface { + // Lookup looks up a specific entry in the receiver. + // See NodeStringLookuper for more. + Lookup(req *fuse.LookupRequest, resp *fuse.LookupResponse, intr Intr) (Node, fuse.Error) +} + +type NodeMkdirer interface { + Mkdir(req *fuse.MkdirRequest, intr Intr) (Node, fuse.Error) +} + +type NodeOpener interface { + // Open opens the receiver. + // XXX note about access. XXX OpenFlags. + // XXX note that the Node may be a file or directory. + Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr Intr) (Handle, fuse.Error) +} + +type NodeCreater interface { + // Create creates a new directory entry in the receiver, which + // must be a directory. + Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr Intr) (Node, Handle, fuse.Error) +} + +type NodeForgetter interface { + Forget() +} + +type NodeRenamer interface { + Rename(req *fuse.RenameRequest, newDir Node, intr Intr) fuse.Error +} + +type NodeMknoder interface { + Mknod(req *fuse.MknodRequest, intr Intr) (Node, fuse.Error) +} + +// TODO this should be on Handle not Node +type NodeFsyncer interface { + Fsync(req *fuse.FsyncRequest, intr Intr) fuse.Error +} + +type NodeGetxattrer interface { + // Getxattr gets an extended attribute by the given name from the + // node. + // + // If there is no xattr by that name, returns fuse.ENODATA. This + // will be translated to the platform-specific correct error code + // by the framework. + Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr Intr) fuse.Error +} + +type NodeListxattrer interface { + // Listxattr lists the extended attributes recorded for the node. + Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr Intr) fuse.Error +} + +type NodeSetxattrer interface { + // Setxattr sets an extended attribute with the given name and + // value for the node. + Setxattr(req *fuse.SetxattrRequest, intr Intr) fuse.Error +} + +type NodeRemovexattrer interface { + // Removexattr removes an extended attribute for the name. + // + // If there is no xattr by that name, returns fuse.ENODATA. This + // will be translated to the platform-specific correct error code + // by the framework. + Removexattr(req *fuse.RemovexattrRequest, intr Intr) fuse.Error +} + +var startTime = time.Now() + +func nodeAttr(n Node) (attr fuse.Attr) { + attr = n.Attr() + if attr.Nlink == 0 { + attr.Nlink = 1 + } + if attr.Atime.IsZero() { + attr.Atime = startTime + } + if attr.Mtime.IsZero() { + attr.Mtime = startTime + } + if attr.Ctime.IsZero() { + attr.Ctime = startTime + } + if attr.Crtime.IsZero() { + attr.Crtime = startTime + } + return +} + +// A Handle is the interface required of an opened file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// Other FUSE requests can be handled by implementing methods from the +// Node* interfaces. The most common to implement are +// HandleReader, HandleReadDirer, and HandleWriter. +// +// TODO implement methods: Getlk, Setlk, Setlkw +type Handle interface { +} + +type HandleFlusher interface { + // Flush is called each time the file or directory is closed. + // Because there can be multiple file descriptors referring to a + // single opened file, Flush can be called multiple times. + Flush(req *fuse.FlushRequest, intr Intr) fuse.Error +} + +type HandleReadAller interface { + ReadAll(intr Intr) ([]byte, fuse.Error) +} + +type HandleReadDirer interface { + ReadDir(intrt Intr) ([]fuse.Dirent, fuse.Error) +} + +type HandleReader interface { + Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr Intr) fuse.Error +} + +type HandleWriter interface { + Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr Intr) fuse.Error +} + +type HandleReleaser interface { + Release(req *fuse.ReleaseRequest, intr Intr) fuse.Error +} + +type Server struct { + FS FS + + // Function to send debug log messages to. If nil, use fuse.Debug. + // Note that changing this or fuse.Debug may not affect existing + // calls to Serve. + Debug func(msg interface{}) +} + +// Serve serves the FUSE connection by making calls to the methods +// of fs and the Nodes and Handles it makes available. It returns only +// when the connection has been closed or an unexpected error occurs. +func (s *Server) Serve(c *fuse.Conn) error { + sc := serveConn{ + fs: s.FS, + debug: s.Debug, + req: map[fuse.RequestID]*serveRequest{}, + dynamicInode: GenerateDynamicInode, + } + if sc.debug == nil { + sc.debug = fuse.Debug + } + if dyn, ok := sc.fs.(FSInodeGenerator); ok { + sc.dynamicInode = dyn.GenerateInode + } + + root, err := sc.fs.Root() + if err != nil { + return fmt.Errorf("cannot obtain root node: %v", syscall.Errno(err.(fuse.Errno)).Error()) + } + sc.node = append(sc.node, nil, &serveNode{inode: 1, node: root, refs: 1}) + sc.handle = append(sc.handle, nil) + + for { + req, err := c.ReadRequest() + if err != nil { + if err == io.EOF { + break + } + return err + } + + go sc.serve(req) + } + return nil +} + +// Serve serves a FUSE connection with the default settings. See +// Server.Serve. +func Serve(c *fuse.Conn, fs FS) error { + server := Server{ + FS: fs, + } + return server.Serve(c) +} + +type nothing struct{} + +type serveConn struct { + meta sync.Mutex + fs FS + req map[fuse.RequestID]*serveRequest + node []*serveNode + handle []*serveHandle + freeNode []fuse.NodeID + freeHandle []fuse.HandleID + nodeGen uint64 + debug func(msg interface{}) + dynamicInode func(parent uint64, name string) uint64 +} + +type serveRequest struct { + Request fuse.Request + Intr Intr +} + +type serveNode struct { + inode uint64 + node Node + refs uint64 +} + +func (sn *serveNode) attr() (attr fuse.Attr) { + attr = nodeAttr(sn.node) + if attr.Inode == 0 { + attr.Inode = sn.inode + } + return +} + +type serveHandle struct { + handle Handle + readData []byte + nodeID fuse.NodeID +} + +// NodeRef can be embedded in a Node to recognize the same Node being +// returned from multiple Lookup, Create etc calls. +// +// Without this, each Node will get a new NodeID, causing spurious +// cache invalidations, extra lookups and aliasing anomalies. This may +// not matter for a simple, read-only filesystem. +type NodeRef struct { + id fuse.NodeID + generation uint64 +} + +// nodeRef is only ever accessed while holding serveConn.meta +func (n *NodeRef) nodeRef() *NodeRef { + return n +} + +type nodeRef interface { + nodeRef() *NodeRef +} + +func (c *serveConn) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) { + c.meta.Lock() + defer c.meta.Unlock() + + var ref *NodeRef + if nodeRef, ok := node.(nodeRef); ok { + ref = nodeRef.nodeRef() + + if ref.id != 0 { + // dropNode guarantees that NodeRef is zeroed at the same + // time as the NodeID is removed from serveConn.node, as + // guarded by c.meta; this means sn cannot be nil here + sn := c.node[ref.id] + sn.refs++ + return ref.id, ref.generation + } + } + + sn := &serveNode{inode: inode, node: node, refs: 1} + if n := len(c.freeNode); n > 0 { + id = c.freeNode[n-1] + c.freeNode = c.freeNode[:n-1] + c.node[id] = sn + c.nodeGen++ + } else { + id = fuse.NodeID(len(c.node)) + c.node = append(c.node, sn) + } + gen = c.nodeGen + if ref != nil { + ref.id = id + ref.generation = gen + } + return +} + +func (c *serveConn) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) { + c.meta.Lock() + shandle := &serveHandle{handle: handle, nodeID: nodeID} + if n := len(c.freeHandle); n > 0 { + id = c.freeHandle[n-1] + c.freeHandle = c.freeHandle[:n-1] + c.handle[id] = shandle + } else { + id = fuse.HandleID(len(c.handle)) + c.handle = append(c.handle, shandle) + } + c.meta.Unlock() + return +} + +type nodeRefcountDropBug struct { + N uint64 + Refs uint64 + Node fuse.NodeID +} + +func (n *nodeRefcountDropBug) String() string { + return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node) +} + +func (c *serveConn) dropNode(id fuse.NodeID, n uint64) (forget bool) { + c.meta.Lock() + defer c.meta.Unlock() + snode := c.node[id] + + if snode == nil { + // this should only happen if refcounts kernel<->us disagree + // *and* two ForgetRequests for the same node race each other; + // this indicates a bug somewhere + c.debug(nodeRefcountDropBug{N: n, Node: id}) + + // we may end up triggering Forget twice, but that's better + // than not even once, and that's the best we can do + return true + } + + if n > snode.refs { + c.debug(nodeRefcountDropBug{N: n, Refs: snode.refs, Node: id}) + n = snode.refs + } + + snode.refs -= n + if snode.refs == 0 { + c.node[id] = nil + if nodeRef, ok := snode.node.(nodeRef); ok { + ref := nodeRef.nodeRef() + *ref = NodeRef{} + } + c.freeNode = append(c.freeNode, id) + return true + } + return false +} + +func (c *serveConn) dropHandle(id fuse.HandleID) { + c.meta.Lock() + c.handle[id] = nil + c.freeHandle = append(c.freeHandle, id) + c.meta.Unlock() +} + +type missingHandle struct { + Handle fuse.HandleID + MaxHandle fuse.HandleID +} + +func (m missingHandle) String() string { + return fmt.Sprint("missing handle", m.Handle, m.MaxHandle) +} + +// Returns nil for invalid handles. +func (c *serveConn) getHandle(id fuse.HandleID) (shandle *serveHandle) { + c.meta.Lock() + defer c.meta.Unlock() + if id < fuse.HandleID(len(c.handle)) { + shandle = c.handle[uint(id)] + } + if shandle == nil { + c.debug(missingHandle{ + Handle: id, + MaxHandle: fuse.HandleID(len(c.handle)), + }) + } + return +} + +type request struct { + Op string + Request *fuse.Header + In interface{} `json:",omitempty"` +} + +func (r request) String() string { + return fmt.Sprintf("<- %s", r.In) +} + +type logResponseHeader struct { + ID fuse.RequestID +} + +func (m logResponseHeader) String() string { + return fmt.Sprintf("ID=%#x", m.ID) +} + +type response struct { + Op string + Request logResponseHeader + Out interface{} `json:",omitempty"` + // Errno contains the errno value as a string, for example "EPERM". + Errno string `json:",omitempty"` + // Error may contain a free form error message. + Error string `json:",omitempty"` +} + +func (r response) errstr() string { + s := r.Errno + if r.Error != "" { + // prefix the errno constant to the long form message + s = s + ": " + r.Error + } + return s +} + +func (r response) String() string { + switch { + case r.Errno != "" && r.Out != nil: + return fmt.Sprintf("-> %s error=%s %s", r.Request, r.errstr(), r.Out) + case r.Errno != "": + return fmt.Sprintf("-> %s error=%s", r.Request, r.errstr()) + case r.Out != nil: + // make sure (seemingly) empty values are readable + switch r.Out.(type) { + case string: + return fmt.Sprintf("-> %s %q", r.Request, r.Out) + case []byte: + return fmt.Sprintf("-> %s [% x]", r.Request, r.Out) + default: + return fmt.Sprintf("-> %s %s", r.Request, r.Out) + } + default: + return fmt.Sprintf("-> %s", r.Request) + } +} + +type logMissingNode struct { + MaxNode fuse.NodeID +} + +func opName(req fuse.Request) string { + t := reflect.Indirect(reflect.ValueOf(req)).Type() + s := t.Name() + s = strings.TrimSuffix(s, "Request") + return s +} + +type logLinkRequestOldNodeNotFound struct { + Request *fuse.Header + In *fuse.LinkRequest +} + +func (m *logLinkRequestOldNodeNotFound) String() string { + return fmt.Sprintf("In LinkRequest (request %#x), node %d not found", m.Request.Hdr().ID, m.In.OldNode) +} + +type renameNewDirNodeNotFound struct { + Request *fuse.Header + In *fuse.RenameRequest +} + +func (m *renameNewDirNodeNotFound) String() string { + return fmt.Sprintf("In RenameRequest (request %#x), node %d not found", m.Request.Hdr().ID, m.In.NewDir) +} + +func (c *serveConn) serve(r fuse.Request) { + intr := make(Intr) + req := &serveRequest{Request: r, Intr: intr} + + c.debug(request{ + Op: opName(r), + Request: r.Hdr(), + In: r, + }) + var node Node + var snode *serveNode + c.meta.Lock() + hdr := r.Hdr() + if id := hdr.Node; id != 0 { + if id < fuse.NodeID(len(c.node)) { + snode = c.node[uint(id)] + } + if snode == nil { + c.meta.Unlock() + c.debug(response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + Error: fuse.ESTALE.ErrnoName(), + // this is the only place that sets both Error and + // Out; not sure if i want to do that; might get rid + // of len(c.node) things altogether + Out: logMissingNode{ + MaxNode: fuse.NodeID(len(c.node)), + }, + }) + r.RespondError(fuse.ESTALE) + return + } + node = snode.node + } + if c.req[hdr.ID] != nil { + // This happens with OSXFUSE. Assume it's okay and + // that we'll never see an interrupt for this one. + // Otherwise everything wedges. TODO: Report to OSXFUSE? + // + // TODO this might have been because of missing done() calls + intr = nil + } else { + c.req[hdr.ID] = req + } + c.meta.Unlock() + + // Call this before responding. + // After responding is too late: we might get another request + // with the same ID and be very confused. + done := func(resp interface{}) { + msg := response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + } + if err, ok := resp.(error); ok { + msg.Error = err.Error() + if ferr, ok := err.(fuse.ErrorNumber); ok { + errno := ferr.Errno() + msg.Errno = errno.ErrnoName() + if errno == err { + // it's just a fuse.Errno with no extra detail; + // skip the textual message for log readability + msg.Error = "" + } + } else { + msg.Errno = fuse.DefaultErrno.ErrnoName() + } + } else { + msg.Out = resp + } + c.debug(msg) + + c.meta.Lock() + delete(c.req, hdr.ID) + c.meta.Unlock() + } + + switch r := r.(type) { + default: + // Note: To FUSE, ENOSYS means "this server never implements this request." + // It would be inappropriate to return ENOSYS for other operations in this + // switch that might only be unavailable in some contexts, not all. + done(fuse.ENOSYS) + r.RespondError(fuse.ENOSYS) + + // FS operations. + case *fuse.InitRequest: + s := &fuse.InitResponse{ + MaxWrite: 128 * 1024, + Flags: fuse.InitBigWrites, + } + if fs, ok := c.fs.(FSIniter); ok { + if err := fs.Init(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(s) + r.Respond(s) + + case *fuse.StatfsRequest: + s := &fuse.StatfsResponse{} + if fs, ok := c.fs.(FSStatfser); ok { + if err := fs.Statfs(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(s) + r.Respond(s) + + // Node operations. + case *fuse.GetattrRequest: + s := &fuse.GetattrResponse{} + if n, ok := node.(NodeGetattrer); ok { + if err := n.Getattr(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } else { + s.AttrValid = attrValidTime + s.Attr = snode.attr() + } + done(s) + r.Respond(s) + + case *fuse.SetattrRequest: + s := &fuse.SetattrResponse{} + if n, ok := node.(NodeSetattrer); ok { + if err := n.Setattr(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + done(s) + r.Respond(s) + break + } + + if s.AttrValid == 0 { + s.AttrValid = attrValidTime + } + s.Attr = snode.attr() + done(s) + r.Respond(s) + + case *fuse.SymlinkRequest: + s := &fuse.SymlinkResponse{} + n, ok := node.(NodeSymlinker) + if !ok { + done(fuse.EIO) // XXX or EPERM like Mkdir? + r.RespondError(fuse.EIO) + break + } + n2, err := n.Symlink(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(&s.LookupResponse, snode, r.NewName, n2) + done(s) + r.Respond(s) + + case *fuse.ReadlinkRequest: + n, ok := node.(NodeReadlinker) + if !ok { + done(fuse.EIO) /// XXX or EPERM? + r.RespondError(fuse.EIO) + break + } + target, err := n.Readlink(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(target) + r.Respond(target) + + case *fuse.LinkRequest: + n, ok := node.(NodeLinker) + if !ok { + done(fuse.EIO) /// XXX or EPERM? + r.RespondError(fuse.EIO) + break + } + c.meta.Lock() + var oldNode *serveNode + if int(r.OldNode) < len(c.node) { + oldNode = c.node[r.OldNode] + } + c.meta.Unlock() + if oldNode == nil { + c.debug(logLinkRequestOldNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + n2, err := n.Link(r, oldNode.node, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + s := &fuse.LookupResponse{} + c.saveLookup(s, snode, r.NewName, n2) + done(s) + r.Respond(s) + + case *fuse.RemoveRequest: + n, ok := node.(NodeRemover) + if !ok { + done(fuse.EIO) /// XXX or EPERM? + r.RespondError(fuse.EIO) + break + } + err := n.Remove(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.AccessRequest: + if n, ok := node.(NodeAccesser); ok { + if err := n.Access(r, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(nil) + r.Respond() + + case *fuse.LookupRequest: + var n2 Node + var err fuse.Error + s := &fuse.LookupResponse{} + if n, ok := node.(NodeStringLookuper); ok { + n2, err = n.Lookup(r.Name, intr) + } else if n, ok := node.(NodeRequestLookuper); ok { + n2, err = n.Lookup(r, s, intr) + } else { + done(fuse.ENOENT) + r.RespondError(fuse.ENOENT) + break + } + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(s, snode, r.Name, n2) + done(s) + r.Respond(s) + + case *fuse.MkdirRequest: + s := &fuse.MkdirResponse{} + n, ok := node.(NodeMkdirer) + if !ok { + done(fuse.EPERM) + r.RespondError(fuse.EPERM) + break + } + n2, err := n.Mkdir(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(&s.LookupResponse, snode, r.Name, n2) + done(s) + r.Respond(s) + + case *fuse.OpenRequest: + s := &fuse.OpenResponse{} + var h2 Handle + if n, ok := node.(NodeOpener); ok { + hh, err := n.Open(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + h2 = hh + } else { + h2 = node + } + s.Handle = c.saveHandle(h2, hdr.Node) + done(s) + r.Respond(s) + + case *fuse.CreateRequest: + n, ok := node.(NodeCreater) + if !ok { + // If we send back ENOSYS, FUSE will try mknod+open. + done(fuse.EPERM) + r.RespondError(fuse.EPERM) + break + } + s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} + n2, h2, err := n.Create(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + c.saveLookup(&s.LookupResponse, snode, r.Name, n2) + s.Handle = c.saveHandle(h2, hdr.Node) + done(s) + r.Respond(s) + + case *fuse.GetxattrRequest: + n, ok := node.(NodeGetxattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + s := &fuse.GetxattrResponse{} + err := n.Getxattr(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + done(fuse.ERANGE) + r.RespondError(fuse.ERANGE) + break + } + done(s) + r.Respond(s) + + case *fuse.ListxattrRequest: + n, ok := node.(NodeListxattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + s := &fuse.ListxattrResponse{} + err := n.Listxattr(r, s, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + done(fuse.ERANGE) + r.RespondError(fuse.ERANGE) + break + } + done(s) + r.Respond(s) + + case *fuse.SetxattrRequest: + n, ok := node.(NodeSetxattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + err := n.Setxattr(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.RemovexattrRequest: + n, ok := node.(NodeRemovexattrer) + if !ok { + done(fuse.ENOTSUP) + r.RespondError(fuse.ENOTSUP) + break + } + err := n.Removexattr(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.ForgetRequest: + forget := c.dropNode(hdr.Node, r.N) + if forget { + n, ok := node.(NodeForgetter) + if ok { + n.Forget() + } + } + done(nil) + r.Respond() + + // Handle operations. + case *fuse.ReadRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + handle := shandle.handle + + s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)} + if r.Dir { + if h, ok := handle.(HandleReadDirer); ok { + if shandle.readData == nil { + dirs, err := h.ReadDir(intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + var data []byte + for _, dir := range dirs { + if dir.Inode == 0 { + dir.Inode = c.dynamicInode(snode.inode, dir.Name) + } + data = fuse.AppendDirent(data, dir) + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + break + } + } else { + if h, ok := handle.(HandleReadAller); ok { + if shandle.readData == nil { + data, err := h.ReadAll(intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + if data == nil { + data = []byte{} + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + break + } + h, ok := handle.(HandleReader) + if !ok { + fmt.Printf("NO READ FOR %T\n", handle) + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + if err := h.Read(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(s) + r.Respond(s) + + case *fuse.WriteRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + + s := &fuse.WriteResponse{} + if h, ok := shandle.handle.(HandleWriter); ok { + if err := h.Write(r, s, intr); err != nil { + done(err) + r.RespondError(err) + break + } + done(s) + r.Respond(s) + break + } + done(fuse.EIO) + r.RespondError(fuse.EIO) + + case *fuse.FlushRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + handle := shandle.handle + + if h, ok := handle.(HandleFlusher); ok { + if err := h.Flush(r, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(nil) + r.Respond() + + case *fuse.ReleaseRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + done(fuse.ESTALE) + r.RespondError(fuse.ESTALE) + return + } + handle := shandle.handle + + // No matter what, release the handle. + c.dropHandle(r.Handle) + + if h, ok := handle.(HandleReleaser); ok { + if err := h.Release(r, intr); err != nil { + done(err) + r.RespondError(err) + break + } + } + done(nil) + r.Respond() + + case *fuse.DestroyRequest: + if fs, ok := c.fs.(FSDestroyer); ok { + fs.Destroy() + } + done(nil) + r.Respond() + + case *fuse.RenameRequest: + c.meta.Lock() + var newDirNode *serveNode + if int(r.NewDir) < len(c.node) { + newDirNode = c.node[r.NewDir] + } + c.meta.Unlock() + if newDirNode == nil { + c.debug(renameNewDirNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + n, ok := node.(NodeRenamer) + if !ok { + done(fuse.EIO) // XXX or EPERM like Mkdir? + r.RespondError(fuse.EIO) + break + } + err := n.Rename(r, newDirNode.node, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.MknodRequest: + n, ok := node.(NodeMknoder) + if !ok { + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + n2, err := n.Mknod(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + s := &fuse.LookupResponse{} + c.saveLookup(s, snode, r.Name, n2) + done(s) + r.Respond(s) + + case *fuse.FsyncRequest: + n, ok := node.(NodeFsyncer) + if !ok { + done(fuse.EIO) + r.RespondError(fuse.EIO) + break + } + err := n.Fsync(r, intr) + if err != nil { + done(err) + r.RespondError(err) + break + } + done(nil) + r.Respond() + + case *fuse.InterruptRequest: + c.meta.Lock() + ireq := c.req[r.IntrID] + if ireq != nil && ireq.Intr != nil { + close(ireq.Intr) + ireq.Intr = nil + } + c.meta.Unlock() + done(nil) + r.Respond() + + /* case *FsyncdirRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + + case *GetlkRequest, *SetlkRequest, *SetlkwRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + + case *BmapRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + + case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest: + done(ENOSYS) + r.RespondError(ENOSYS) + */ + } +} + +func (c *serveConn) saveLookup(s *fuse.LookupResponse, snode *serveNode, elem string, n2 Node) { + s.Attr = nodeAttr(n2) + if s.Attr.Inode == 0 { + s.Attr.Inode = c.dynamicInode(snode.inode, elem) + } + + s.Node, s.Generation = c.saveNode(s.Attr.Inode, n2) + if s.EntryValid == 0 { + s.EntryValid = entryValidTime + } + if s.AttrValid == 0 { + s.AttrValid = attrValidTime + } +} + +// DataHandle returns a read-only Handle that satisfies reads +// using the given data. +func DataHandle(data []byte) Handle { + return &dataHandle{data} +} + +type dataHandle struct { + data []byte +} + +func (d *dataHandle) ReadAll(intr Intr) ([]byte, fuse.Error) { + return d.data, nil +} + +// GenerateDynamicInode returns a dynamic inode. +// +// The parent inode and current entry name are used as the criteria +// for choosing a pseudorandom inode. This makes it likely the same +// entry will get the same inode on multiple runs. +func GenerateDynamicInode(parent uint64, name string) uint64 { + h := fnv.New64a() + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], parent) + _, _ = h.Write(buf[:]) + _, _ = h.Write([]byte(name)) + var inode uint64 + for { + inode = h.Sum64() + if inode != 0 { + break + } + // there's a tiny probability that result is zero; change the + // input a little and try again + _, _ = h.Write([]byte{'x'}) + } + return inode +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fs/serve_test.go b/Godeps/_workspace/src/bazil.org/fuse/fs/serve_test.go new file mode 100644 index 000000000..dde3ce905 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fs/serve_test.go @@ -0,0 +1,1763 @@ +package fs_test + +import ( + "bytes" + "errors" + "flag" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "syscall" + "testing" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil/record" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fuseutil" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/syscallx" +) + +// TO TEST: +// Lookup(*LookupRequest, *LookupResponse) +// Getattr(*GetattrRequest, *GetattrResponse) +// Attr with explicit inode +// Setattr(*SetattrRequest, *SetattrResponse) +// Access(*AccessRequest) +// Open(*OpenRequest, *OpenResponse) +// Write(*WriteRequest, *WriteResponse) +// Flush(*FlushRequest, *FlushResponse) + +func init() { + fstestutil.DebugByDefault() +} + +var childMode bool + +func init() { + flag.BoolVar(&childMode, "fuse.internal.childmode", false, "internal use only") +} + +// childCmd prepares a test function to be run in a subprocess, with +// childMode set to true. Caller must still call Run or Start. +// +// Re-using the test executable as the subprocess is useful because +// now test executables can e.g. be cross-compiled, transferred +// between hosts, and run in settings where the whole Go development +// environment is not installed. +func childCmd(testName string) (*exec.Cmd, error) { + // caller may set cwd, so we can't rely on relative paths + executable, err := filepath.Abs(os.Args[0]) + if err != nil { + return nil, err + } + testName = regexp.QuoteMeta(testName) + cmd := exec.Command(executable, "-test.run=^"+testName+"$", "-fuse.internal.childmode") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd, nil +} + +// childMapFS is an FS with one fixed child named "child". +type childMapFS map[string]fs.Node + +var _ = fs.FS(childMapFS{}) +var _ = fs.Node(childMapFS{}) +var _ = fs.NodeStringLookuper(childMapFS{}) + +func (f childMapFS) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0777} +} + +func (f childMapFS) Root() (fs.Node, fuse.Error) { + return f, nil +} + +func (f childMapFS) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + child, ok := f[name] + if !ok { + return nil, fuse.ENOENT + } + return child, nil +} + +// simpleFS is a trivial FS that just implements the Root method. +type simpleFS struct { + node fs.Node +} + +var _ = fs.FS(simpleFS{}) + +func (f simpleFS) Root() (fs.Node, fuse.Error) { + return f.node, nil +} + +// file can be embedded in a struct to make it look like a file. +type file struct{} + +func (f file) Attr() fuse.Attr { return fuse.Attr{Mode: 0666} } + +// dir can be embedded in a struct to make it look like a directory. +type dir struct{} + +func (f dir) Attr() fuse.Attr { return fuse.Attr{Mode: os.ModeDir | 0777} } + +// symlink can be embedded in a struct to make it look like a symlink. +type symlink struct { + target string +} + +func (f symlink) Attr() fuse.Attr { return fuse.Attr{Mode: os.ModeSymlink | 0666} } + +// fifo can be embedded in a struct to make it look like a named pipe. +type fifo struct{} + +func (f fifo) Attr() fuse.Attr { return fuse.Attr{Mode: os.ModeNamedPipe | 0666} } + +type badRootFS struct{} + +func (badRootFS) Root() (fs.Node, fuse.Error) { + // pick a really distinct error, to identify it later + return nil, fuse.Errno(syscall.ENAMETOOLONG) +} + +func TestRootErr(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, badRootFS{}) + if err == nil { + // path for synchronous mounts (linux): started out fine, now + // wait for Serve to cycle through + err = <-mnt.Error + // without this, unmount will keep failing with EBUSY; nudge + // kernel into realizing InitResponse will not happen + mnt.Conn.Close() + mnt.Close() + } + + if err == nil { + t.Fatal("expected an error") + } + // TODO this should not be a textual comparison, Serve hides + // details + if err.Error() != "cannot obtain root node: file name too long" { + t.Errorf("Unexpected error: %v", err) + } +} + +type testStatFS struct{} + +func (f testStatFS) Root() (fs.Node, fuse.Error) { + return f, nil +} + +func (f testStatFS) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0777} +} + +func (f testStatFS) Statfs(req *fuse.StatfsRequest, resp *fuse.StatfsResponse, int fs.Intr) fuse.Error { + resp.Blocks = 42 + resp.Files = 13 + return nil +} + +func TestStatfs(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, testStatFS{}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + { + var st syscall.Statfs_t + err = syscall.Statfs(mnt.Dir, &st) + if err != nil { + t.Errorf("Statfs failed: %v", err) + } + t.Logf("Statfs got: %#v", st) + if g, e := st.Blocks, uint64(42); g != e { + t.Errorf("got Blocks = %d; want %d", g, e) + } + if g, e := st.Files, uint64(13); g != e { + t.Errorf("got Files = %d; want %d", g, e) + } + } + + { + var st syscall.Statfs_t + f, err := os.Open(mnt.Dir) + if err != nil { + t.Errorf("Open for fstatfs failed: %v", err) + } + defer f.Close() + err = syscall.Fstatfs(int(f.Fd()), &st) + if err != nil { + t.Errorf("Fstatfs failed: %v", err) + } + t.Logf("Fstatfs got: %#v", st) + if g, e := st.Blocks, uint64(42); g != e { + t.Errorf("got Blocks = %d; want %d", g, e) + } + if g, e := st.Files, uint64(13); g != e { + t.Errorf("got Files = %d; want %d", g, e) + } + } + +} + +// Test Stat of root. + +type root struct{} + +func (f root) Root() (fs.Node, fuse.Error) { + return f, nil +} + +func (root) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0555} +} + +func TestStatRoot(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, root{}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fi, err := os.Stat(mnt.Dir) + if err != nil { + t.Fatalf("root getattr failed with %v", err) + } + mode := fi.Mode() + if (mode & os.ModeType) != os.ModeDir { + t.Errorf("root is not a directory: %#v", fi) + } + if mode.Perm() != 0555 { + t.Errorf("root has weird access mode: %v", mode.Perm()) + } + switch stat := fi.Sys().(type) { + case *syscall.Stat_t: + if stat.Ino != 1 { + t.Errorf("root has wrong inode: %v", stat.Ino) + } + if stat.Nlink != 1 { + t.Errorf("root has wrong link count: %v", stat.Nlink) + } + if stat.Uid != 0 { + t.Errorf("root has wrong uid: %d", stat.Uid) + } + if stat.Gid != 0 { + t.Errorf("root has wrong gid: %d", stat.Gid) + } + } +} + +// Test Read calling ReadAll. + +type readAll struct{ file } + +const hi = "hello, world" + +func (readAll) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(hi)), + } +} + +func (readAll) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { + return []byte(hi), nil +} + +func testReadAll(t *testing.T, path string) { + data, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("readAll: %v", err) + } + if string(data) != hi { + t.Errorf("readAll = %q, want %q", data, hi) + } +} + +func TestReadAll(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, childMapFS{"child": readAll{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + testReadAll(t, mnt.Dir+"/child") +} + +// Test Read. + +type readWithHandleRead struct{ file } + +func (readWithHandleRead) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(hi)), + } +} + +func (readWithHandleRead) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + fuseutil.HandleRead(req, resp, []byte(hi)) + return nil +} + +func TestReadAllWithHandleRead(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, childMapFS{"child": readWithHandleRead{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + testReadAll(t, mnt.Dir+"/child") +} + +// Test Release. + +type release struct { + file + record.ReleaseWaiter +} + +func TestRelease(t *testing.T) { + t.Parallel() + r := &release{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": r}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + f, err := os.Open(mnt.Dir + "/child") + if err != nil { + t.Fatal(err) + } + f.Close() + if !r.WaitForRelease(1 * time.Second) { + t.Error("Close did not Release in time") + } +} + +// Test Write calling basic Write, with an fsync thrown in too. + +type write struct { + file + record.Writes + record.Fsyncs +} + +func TestWrite(t *testing.T) { + t.Parallel() + w := &write{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + f, err := os.Create(mnt.Dir + "/child") + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + n, err := f.Write([]byte(hi)) + if err != nil { + t.Fatalf("Write: %v", err) + } + if n != len(hi) { + t.Fatalf("short write; n=%d; hi=%d", n, len(hi)) + } + + err = syscall.Fsync(int(f.Fd())) + if err != nil { + t.Fatalf("Fsync = %v", err) + } + if w.RecordedFsync() == (fuse.FsyncRequest{}) { + t.Errorf("never received expected fsync call") + } + + err = f.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + + if got := string(w.RecordedWriteData()); got != hi { + t.Errorf("write = %q, want %q", got, hi) + } +} + +// Test Write of a larger buffer. + +type writeLarge struct { + file + record.Writes +} + +func TestWriteLarge(t *testing.T) { + t.Parallel() + w := &write{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + f, err := os.Create(mnt.Dir + "/child") + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + const one = "xyzzyfoo" + large := bytes.Repeat([]byte(one), 8192) + n, err := f.Write(large) + if err != nil { + t.Fatalf("Write: %v", err) + } + if g, e := n, len(large); g != e { + t.Fatalf("short write: %d != %d", g, e) + } + + err = f.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + + got := w.RecordedWriteData() + if g, e := len(got), len(large); g != e { + t.Errorf("write wrong length: %d != %d", g, e) + } + if g := strings.Replace(string(got), one, "", -1); g != "" { + t.Errorf("write wrong data: expected repeats of %q, also got %q", one, g) + } +} + +// Test Write calling Setattr+Write+Flush. + +type writeTruncateFlush struct { + file + record.Writes + record.Setattrs + record.Flushes +} + +func TestWriteTruncateFlush(t *testing.T) { + t.Parallel() + w := &writeTruncateFlush{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = ioutil.WriteFile(mnt.Dir+"/child", []byte(hi), 0666) + if err != nil { + t.Fatalf("WriteFile: %v", err) + } + if w.RecordedSetattr() == (fuse.SetattrRequest{}) { + t.Errorf("writeTruncateFlush expected Setattr") + } + if !w.RecordedFlush() { + t.Errorf("writeTruncateFlush expected Setattr") + } + if got := string(w.RecordedWriteData()); got != hi { + t.Errorf("writeTruncateFlush = %q, want %q", got, hi) + } +} + +// Test Mkdir. + +type mkdir1 struct { + dir + record.Mkdirs +} + +func (f *mkdir1) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + f.Mkdirs.Mkdir(req, intr) + return &mkdir1{}, nil +} + +func TestMkdir(t *testing.T) { + t.Parallel() + f := &mkdir1{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // uniform umask needed to make os.Mkdir's mode into something + // reproducible + defer syscall.Umask(syscall.Umask(0022)) + err = os.Mkdir(mnt.Dir+"/foo", 0771) + if err != nil { + t.Fatalf("mkdir: %v", err) + } + want := fuse.MkdirRequest{Name: "foo", Mode: os.ModeDir | 0751} + if g, e := f.RecordedMkdir(), want; g != e { + t.Errorf("mkdir saw %+v, want %+v", g, e) + } +} + +// Test Create (and fsync) + +type create1file struct { + file + record.Fsyncs +} + +type create1 struct { + dir + f create1file +} + +func (f *create1) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { + if req.Name != "foo" { + log.Printf("ERROR create1.Create unexpected name: %q\n", req.Name) + return nil, nil, fuse.EPERM + } + flags := req.Flags + + // OS X does not pass O_TRUNC here, Linux does; as this is a + // Create, that's acceptable + flags &^= fuse.OpenTruncate + + if runtime.GOOS == "linux" { + // Linux <3.7 accidentally leaks O_CLOEXEC through to FUSE; + // avoid spurious test failures + flags &^= fuse.OpenFlags(syscall.O_CLOEXEC) + } + + if g, e := flags, fuse.OpenReadWrite|fuse.OpenCreate; g != e { + log.Printf("ERROR create1.Create unexpected flags: %v != %v\n", g, e) + return nil, nil, fuse.EPERM + } + if g, e := req.Mode, os.FileMode(0644); g != e { + log.Printf("ERROR create1.Create unexpected mode: %v != %v\n", g, e) + return nil, nil, fuse.EPERM + } + return &f.f, &f.f, nil +} + +func TestCreate(t *testing.T) { + t.Parallel() + f := &create1{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // uniform umask needed to make os.Create's 0666 into something + // reproducible + defer syscall.Umask(syscall.Umask(0022)) + ff, err := os.Create(mnt.Dir + "/foo") + if err != nil { + t.Fatalf("create1 WriteFile: %v", err) + } + defer ff.Close() + + err = syscall.Fsync(int(ff.Fd())) + if err != nil { + t.Fatalf("Fsync = %v", err) + } + + if f.f.RecordedFsync() == (fuse.FsyncRequest{}) { + t.Errorf("never received expected fsync call") + } + + ff.Close() +} + +// Test Create + Write + Remove + +type create3file struct { + file + record.Writes +} + +type create3 struct { + dir + f create3file + fooCreated record.MarkRecorder + fooRemoved record.MarkRecorder +} + +func (f *create3) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { + if req.Name != "foo" { + log.Printf("ERROR create3.Create unexpected name: %q\n", req.Name) + return nil, nil, fuse.EPERM + } + f.fooCreated.Mark() + return &f.f, &f.f, nil +} + +func (f *create3) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if f.fooCreated.Recorded() && !f.fooRemoved.Recorded() && name == "foo" { + return &f.f, nil + } + return nil, fuse.ENOENT +} + +func (f *create3) Remove(r *fuse.RemoveRequest, intr fs.Intr) fuse.Error { + if f.fooCreated.Recorded() && !f.fooRemoved.Recorded() && + r.Name == "foo" && !r.Dir { + f.fooRemoved.Mark() + return nil + } + return fuse.ENOENT +} + +func TestCreateWriteRemove(t *testing.T) { + t.Parallel() + f := &create3{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = ioutil.WriteFile(mnt.Dir+"/foo", []byte(hi), 0666) + if err != nil { + t.Fatalf("create3 WriteFile: %v", err) + } + if got := string(f.f.RecordedWriteData()); got != hi { + t.Fatalf("create3 write = %q, want %q", got, hi) + } + + err = os.Remove(mnt.Dir + "/foo") + if err != nil { + t.Fatalf("Remove: %v", err) + } + err = os.Remove(mnt.Dir + "/foo") + if err == nil { + t.Fatalf("second Remove = nil; want some error") + } +} + +// Test symlink + readlink + +// is a Node that is a symlink to target +type symlink1link struct { + symlink + target string +} + +func (f symlink1link) Readlink(*fuse.ReadlinkRequest, fs.Intr) (string, fuse.Error) { + return f.target, nil +} + +type symlink1 struct { + dir + record.Symlinks +} + +func (f *symlink1) Symlink(req *fuse.SymlinkRequest, intr fs.Intr) (fs.Node, fuse.Error) { + f.Symlinks.Symlink(req, intr) + return symlink1link{target: req.Target}, nil +} + +func TestSymlink(t *testing.T) { + t.Parallel() + f := &symlink1{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + const target = "/some-target" + + err = os.Symlink(target, mnt.Dir+"/symlink.file") + if err != nil { + t.Fatalf("os.Symlink: %v", err) + } + + want := fuse.SymlinkRequest{NewName: "symlink.file", Target: target} + if g, e := f.RecordedSymlink(), want; g != e { + t.Errorf("symlink saw %+v, want %+v", g, e) + } + + gotName, err := os.Readlink(mnt.Dir + "/symlink.file") + if err != nil { + t.Fatalf("os.Readlink: %v", err) + } + if gotName != target { + t.Errorf("os.Readlink = %q; want %q", gotName, target) + } +} + +// Test link + +type link1 struct { + dir + record.Links +} + +func (f *link1) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "old" { + return file{}, nil + } + return nil, fuse.ENOENT +} + +func (f *link1) Link(r *fuse.LinkRequest, old fs.Node, intr fs.Intr) (fs.Node, fuse.Error) { + f.Links.Link(r, old, intr) + return file{}, nil +} + +func TestLink(t *testing.T) { + t.Parallel() + f := &link1{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Link(mnt.Dir+"/old", mnt.Dir+"/new") + if err != nil { + t.Fatalf("Link: %v", err) + } + + got := f.RecordedLink() + want := fuse.LinkRequest{ + NewName: "new", + // unpredictable + OldNode: got.OldNode, + } + if g, e := got, want; g != e { + t.Fatalf("link saw %+v, want %+v", g, e) + } +} + +// Test Rename + +type rename1 struct { + dir + renamed record.Counter +} + +func (f *rename1) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "old" { + return file{}, nil + } + return nil, fuse.ENOENT +} + +func (f *rename1) Rename(r *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error { + if r.OldName == "old" && r.NewName == "new" && newDir == f { + f.renamed.Inc() + return nil + } + return fuse.EIO +} + +func TestRename(t *testing.T) { + t.Parallel() + f := &rename1{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Rename(mnt.Dir+"/old", mnt.Dir+"/new") + if err != nil { + t.Fatalf("Rename: %v", err) + } + if g, e := f.renamed.Count(), uint32(1); g != e { + t.Fatalf("expected rename didn't happen: %d != %d", g, e) + } + err = os.Rename(mnt.Dir+"/old2", mnt.Dir+"/new2") + if err == nil { + t.Fatal("expected error on second Rename; got nil") + } +} + +// Test mknod + +type mknod1 struct { + dir + record.Mknods +} + +func (f *mknod1) Mknod(r *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) { + f.Mknods.Mknod(r, intr) + return fifo{}, nil +} + +func TestMknod(t *testing.T) { + t.Parallel() + if os.Getuid() != 0 { + t.Skip("skipping unless root") + } + + f := &mknod1{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + defer syscall.Umask(syscall.Umask(0)) + err = syscall.Mknod(mnt.Dir+"/node", syscall.S_IFIFO|0666, 123) + if err != nil { + t.Fatalf("Mknod: %v", err) + } + + want := fuse.MknodRequest{ + Name: "node", + Mode: os.FileMode(os.ModeNamedPipe | 0666), + Rdev: uint32(123), + } + if runtime.GOOS == "linux" { + // Linux fuse doesn't echo back the rdev if the node + // isn't a device (we're using a FIFO here, as that + // bit is portable.) + want.Rdev = 0 + } + if g, e := f.RecordedMknod(), want; g != e { + t.Fatalf("mknod saw %+v, want %+v", g, e) + } +} + +// Test Read served with DataHandle. + +type dataHandleTest struct { + file +} + +func (dataHandleTest) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(hi)), + } +} + +func (dataHandleTest) Open(*fuse.OpenRequest, *fuse.OpenResponse, fs.Intr) (fs.Handle, fuse.Error) { + return fs.DataHandle([]byte(hi)), nil +} + +func TestDataHandle(t *testing.T) { + t.Parallel() + f := &dataHandleTest{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + data, err := ioutil.ReadFile(mnt.Dir + "/child") + if err != nil { + t.Errorf("readAll: %v", err) + return + } + if string(data) != hi { + t.Errorf("readAll = %q, want %q", data, hi) + } +} + +// Test interrupt + +type interrupt struct { + file + + // strobes to signal we have a read hanging + hanging chan struct{} +} + +func (interrupt) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: 1, + } +} + +func (it *interrupt) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + select { + case it.hanging <- struct{}{}: + default: + } + <-intr + return fuse.EINTR +} + +func TestInterrupt(t *testing.T) { + t.Parallel() + f := &interrupt{} + f.hanging = make(chan struct{}, 1) + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // start a subprocess that can hang until signaled + cmd := exec.Command("cat", mnt.Dir+"/child") + + err = cmd.Start() + if err != nil { + t.Errorf("interrupt: cannot start cat: %v", err) + return + } + + // try to clean up if child is still alive when returning + defer cmd.Process.Kill() + + // wait till we're sure it's hanging in read + <-f.hanging + + err = cmd.Process.Signal(os.Interrupt) + if err != nil { + t.Errorf("interrupt: cannot interrupt cat: %v", err) + return + } + + p, err := cmd.Process.Wait() + if err != nil { + t.Errorf("interrupt: cat bork: %v", err) + return + } + switch ws := p.Sys().(type) { + case syscall.WaitStatus: + if ws.CoreDump() { + t.Errorf("interrupt: didn't expect cat to dump core: %v", ws) + } + + if ws.Exited() { + t.Errorf("interrupt: didn't expect cat to exit normally: %v", ws) + } + + if !ws.Signaled() { + t.Errorf("interrupt: expected cat to get a signal: %v", ws) + } else { + if ws.Signal() != os.Interrupt { + t.Errorf("interrupt: cat got wrong signal: %v", ws) + } + } + default: + t.Logf("interrupt: this platform has no test coverage") + } +} + +// Test truncate + +type truncate struct { + file + record.Setattrs +} + +func testTruncate(t *testing.T, toSize int64) { + t.Parallel() + f := &truncate{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Truncate(mnt.Dir+"/child", toSize) + if err != nil { + t.Fatalf("Truncate: %v", err) + } + gotr := f.RecordedSetattr() + if gotr == (fuse.SetattrRequest{}) { + t.Fatalf("no recorded SetattrRequest") + } + if g, e := gotr.Size, uint64(toSize); g != e { + t.Errorf("got Size = %q; want %q", g, e) + } + if g, e := gotr.Valid&^fuse.SetattrLockOwner, fuse.SetattrSize; g != e { + t.Errorf("got Valid = %q; want %q", g, e) + } + t.Logf("Got request: %#v", gotr) +} + +func TestTruncate42(t *testing.T) { + testTruncate(t, 42) +} + +func TestTruncate0(t *testing.T) { + testTruncate(t, 0) +} + +// Test ftruncate + +type ftruncate struct { + file + record.Setattrs +} + +func testFtruncate(t *testing.T, toSize int64) { + t.Parallel() + f := &ftruncate{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + { + fil, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY, 0666) + if err != nil { + t.Error(err) + return + } + defer fil.Close() + + err = fil.Truncate(toSize) + if err != nil { + t.Fatalf("Ftruncate: %v", err) + } + } + gotr := f.RecordedSetattr() + if gotr == (fuse.SetattrRequest{}) { + t.Fatalf("no recorded SetattrRequest") + } + if g, e := gotr.Size, uint64(toSize); g != e { + t.Errorf("got Size = %q; want %q", g, e) + } + if g, e := gotr.Valid&^fuse.SetattrLockOwner, fuse.SetattrHandle|fuse.SetattrSize; g != e { + t.Errorf("got Valid = %q; want %q", g, e) + } + t.Logf("Got request: %#v", gotr) +} + +func TestFtruncate42(t *testing.T) { + testFtruncate(t, 42) +} + +func TestFtruncate0(t *testing.T) { + testFtruncate(t, 0) +} + +// Test opening existing file truncates + +type truncateWithOpen struct { + file + record.Setattrs +} + +func TestTruncateWithOpen(t *testing.T) { + t.Parallel() + f := &truncateWithOpen{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fil, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + t.Error(err) + return + } + fil.Close() + + gotr := f.RecordedSetattr() + if gotr == (fuse.SetattrRequest{}) { + t.Fatalf("no recorded SetattrRequest") + } + if g, e := gotr.Size, uint64(0); g != e { + t.Errorf("got Size = %q; want %q", g, e) + } + // osxfuse sets SetattrHandle here, linux does not + if g, e := gotr.Valid&^(fuse.SetattrLockOwner|fuse.SetattrHandle), fuse.SetattrSize; g != e { + t.Errorf("got Valid = %q; want %q", g, e) + } + t.Logf("Got request: %#v", gotr) +} + +// Test readdir + +type readdir struct { + dir +} + +func (d *readdir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + return []fuse.Dirent{ + {Name: "one", Inode: 11, Type: fuse.DT_Dir}, + {Name: "three", Inode: 13}, + {Name: "two", Inode: 12, Type: fuse.DT_File}, + }, nil +} + +func TestReadDir(t *testing.T) { + t.Parallel() + f := &readdir{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fil, err := os.Open(mnt.Dir) + if err != nil { + t.Error(err) + return + } + defer fil.Close() + + // go Readdir is just Readdirnames + Lstat, there's no point in + // testing that here; we have no consumption API for the real + // dirent data + names, err := fil.Readdirnames(100) + if err != nil { + t.Error(err) + return + } + + t.Logf("Got readdir: %q", names) + + if len(names) != 3 || + names[0] != "one" || + names[1] != "three" || + names[2] != "two" { + t.Errorf(`expected 3 entries of "one", "three", "two", got: %q`, names) + return + } +} + +// Test Chmod. + +type chmod struct { + file + record.Setattrs +} + +func (f *chmod) Setattr(req *fuse.SetattrRequest, resp *fuse.SetattrResponse, intr fs.Intr) fuse.Error { + if !req.Valid.Mode() { + log.Printf("setattr not a chmod: %v", req.Valid) + return fuse.EIO + } + f.Setattrs.Setattr(req, resp, intr) + return nil +} + +func TestChmod(t *testing.T) { + t.Parallel() + f := &chmod{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = os.Chmod(mnt.Dir+"/child", 0764) + if err != nil { + t.Errorf("chmod: %v", err) + return + } + got := f.RecordedSetattr() + if g, e := got.Mode, os.FileMode(0764); g != e { + t.Errorf("wrong mode: %v != %v", g, e) + } +} + +// Test open + +type open struct { + file + record.Opens +} + +func (f *open) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + f.Opens.Open(req, resp, intr) + // pick a really distinct error, to identify it later + return nil, fuse.Errno(syscall.ENAMETOOLONG) + +} + +func TestOpen(t *testing.T) { + t.Parallel() + f := &open{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + // node: mode only matters with O_CREATE + fil, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY|os.O_APPEND, 0) + if err == nil { + t.Error("Open err == nil, expected ENAMETOOLONG") + fil.Close() + return + } + + switch err2 := err.(type) { + case *os.PathError: + if err2.Err == syscall.ENAMETOOLONG { + break + } + t.Errorf("unexpected inner error: %#v", err2) + default: + t.Errorf("unexpected error: %v", err) + } + + want := fuse.OpenRequest{Dir: false, Flags: fuse.OpenWriteOnly | fuse.OpenAppend} + if runtime.GOOS == "darwin" { + // osxfuse does not let O_APPEND through at all + // + // https://code.google.com/p/macfuse/issues/detail?id=233 + // https://code.google.com/p/macfuse/issues/detail?id=132 + // https://code.google.com/p/macfuse/issues/detail?id=133 + want.Flags &^= fuse.OpenAppend + } + got := f.RecordedOpen() + + if runtime.GOOS == "linux" { + // Linux <3.7 accidentally leaks O_CLOEXEC through to FUSE; + // avoid spurious test failures + got.Flags &^= fuse.OpenFlags(syscall.O_CLOEXEC) + } + + if g, e := got, want; g != e { + t.Errorf("open saw %v, want %v", g, e) + return + } +} + +// Test Fsync on a dir + +type fsyncDir struct { + dir + record.Fsyncs +} + +func TestFsyncDir(t *testing.T) { + t.Parallel() + f := &fsyncDir{} + mnt, err := fstestutil.MountedT(t, simpleFS{f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + fil, err := os.Open(mnt.Dir) + if err != nil { + t.Errorf("fsyncDir open: %v", err) + return + } + defer fil.Close() + err = fil.Sync() + if err != nil { + t.Errorf("fsyncDir sync: %v", err) + return + } + + got := f.RecordedFsync() + want := fuse.FsyncRequest{ + Flags: 0, + Dir: true, + // unpredictable + Handle: got.Handle, + } + if runtime.GOOS == "darwin" { + // TODO document the meaning of these flags, figure out why + // they differ + want.Flags = 1 + } + if g, e := got, want; g != e { + t.Fatalf("fsyncDir saw %+v, want %+v", g, e) + } +} + +// Test Getxattr + +type getxattr struct { + file + record.Getxattrs +} + +func (f *getxattr) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + f.Getxattrs.Getxattr(req, resp, intr) + resp.Xattr = []byte("hello, world") + return nil +} + +func TestGetxattr(t *testing.T) { + t.Parallel() + f := &getxattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 8192) + n, err := syscallx.Getxattr(mnt.Dir+"/child", "not-there", buf) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + buf = buf[:n] + if g, e := string(buf), "hello, world"; g != e { + t.Errorf("wrong getxattr content: %#v != %#v", g, e) + } + seen := f.RecordedGetxattr() + if g, e := seen.Name, "not-there"; g != e { + t.Errorf("wrong getxattr name: %#v != %#v", g, e) + } +} + +// Test Getxattr that has no space to return value + +type getxattrTooSmall struct { + file +} + +func (f *getxattrTooSmall) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("hello, world") + return nil +} + +func TestGetxattrTooSmall(t *testing.T) { + t.Parallel() + f := &getxattrTooSmall{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 3) + _, err = syscallx.Getxattr(mnt.Dir+"/child", "whatever", buf) + if err == nil { + t.Error("Getxattr = nil; want some error") + } + if err != syscall.ERANGE { + t.Errorf("unexpected error: %v", err) + return + } +} + +// Test Getxattr used to probe result size + +type getxattrSize struct { + file +} + +func (f *getxattrSize) Getxattr(req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("hello, world") + return nil +} + +func TestGetxattrSize(t *testing.T) { + t.Parallel() + f := &getxattrSize{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + n, err := syscallx.Getxattr(mnt.Dir+"/child", "whatever", nil) + if err != nil { + t.Errorf("Getxattr unexpected error: %v", err) + return + } + if g, e := n, len("hello, world"); g != e { + t.Errorf("Getxattr incorrect size: %d != %d", g, e) + } +} + +// Test Listxattr + +type listxattr struct { + file + record.Listxattrs +} + +func (f *listxattr) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + f.Listxattrs.Listxattr(req, resp, intr) + resp.Append("one", "two") + return nil +} + +func TestListxattr(t *testing.T) { + t.Parallel() + f := &listxattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 8192) + n, err := syscallx.Listxattr(mnt.Dir+"/child", buf) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + buf = buf[:n] + if g, e := string(buf), "one\x00two\x00"; g != e { + t.Errorf("wrong listxattr content: %#v != %#v", g, e) + } + + want := fuse.ListxattrRequest{ + Size: 8192, + } + if g, e := f.RecordedListxattr(), want; g != e { + t.Fatalf("listxattr saw %+v, want %+v", g, e) + } +} + +// Test Listxattr that has no space to return value + +type listxattrTooSmall struct { + file +} + +func (f *listxattrTooSmall) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("one\x00two\x00") + return nil +} + +func TestListxattrTooSmall(t *testing.T) { + t.Parallel() + f := &listxattrTooSmall{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + buf := make([]byte, 3) + _, err = syscallx.Listxattr(mnt.Dir+"/child", buf) + if err == nil { + t.Error("Listxattr = nil; want some error") + } + if err != syscall.ERANGE { + t.Errorf("unexpected error: %v", err) + return + } +} + +// Test Listxattr used to probe result size + +type listxattrSize struct { + file +} + +func (f *listxattrSize) Listxattr(req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse, intr fs.Intr) fuse.Error { + resp.Xattr = []byte("one\x00two\x00") + return nil +} + +func TestListxattrSize(t *testing.T) { + t.Parallel() + f := &listxattrSize{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + n, err := syscallx.Listxattr(mnt.Dir+"/child", nil) + if err != nil { + t.Errorf("Listxattr unexpected error: %v", err) + return + } + if g, e := n, len("one\x00two\x00"); g != e { + t.Errorf("Getxattr incorrect size: %d != %d", g, e) + } +} + +// Test Setxattr + +type setxattr struct { + file + record.Setxattrs +} + +func TestSetxattr(t *testing.T) { + t.Parallel() + f := &setxattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = syscallx.Setxattr(mnt.Dir+"/child", "greeting", []byte("hello, world"), 0) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + // fuse.SetxattrRequest contains a byte slice and thus cannot be + // directly compared + got := f.RecordedSetxattr() + + if g, e := got.Name, "greeting"; g != e { + t.Errorf("Setxattr incorrect name: %q != %q", g, e) + } + + if g, e := got.Flags, uint32(0); g != e { + t.Errorf("Setxattr incorrect flags: %d != %d", g, e) + } + + if g, e := string(got.Xattr), "hello, world"; g != e { + t.Errorf("Setxattr incorrect data: %q != %q", g, e) + } +} + +// Test Removexattr + +type removexattr struct { + file + record.Removexattrs +} + +func TestRemovexattr(t *testing.T) { + t.Parallel() + f := &removexattr{} + mnt, err := fstestutil.MountedT(t, childMapFS{"child": f}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + err = syscallx.Removexattr(mnt.Dir+"/child", "greeting") + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + want := fuse.RemovexattrRequest{Name: "greeting"} + if g, e := f.RecordedRemovexattr(), want; g != e { + t.Errorf("removexattr saw %v, want %v", g, e) + } +} + +// Test default error. + +type defaultErrno struct { + dir +} + +func (f defaultErrno) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + return nil, errors.New("bork") +} + +func TestDefaultErrno(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, simpleFS{defaultErrno{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + _, err = os.Stat(mnt.Dir + "/trigger") + if err == nil { + t.Fatalf("expected error") + } + + switch err2 := err.(type) { + case *os.PathError: + if err2.Err == syscall.EIO { + break + } + t.Errorf("unexpected inner error: Err=%v %#v", err2.Err, err2) + default: + t.Errorf("unexpected error: %v", err) + } +} + +// Test custom error. + +type customErrNode struct { + dir +} + +type myCustomError struct { + fuse.ErrorNumber +} + +var _ = fuse.ErrorNumber(myCustomError{}) + +func (myCustomError) Error() string { + return "bork" +} + +func (f customErrNode) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + return nil, myCustomError{ + ErrorNumber: fuse.Errno(syscall.ENAMETOOLONG), + } +} + +func TestCustomErrno(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, simpleFS{customErrNode{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + _, err = os.Stat(mnt.Dir + "/trigger") + if err == nil { + t.Fatalf("expected error") + } + + switch err2 := err.(type) { + case *os.PathError: + if err2.Err == syscall.ENAMETOOLONG { + break + } + t.Errorf("unexpected inner error: %#v", err2) + default: + t.Errorf("unexpected error: %v", err) + } +} + +// Test Mmap writing + +type inMemoryFile struct { + data []byte +} + +func (f *inMemoryFile) Attr() fuse.Attr { + return fuse.Attr{ + Mode: 0666, + Size: uint64(len(f.data)), + } +} + +func (f *inMemoryFile) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + fuseutil.HandleRead(req, resp, f.data) + return nil +} + +func (f *inMemoryFile) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { + resp.Size = copy(f.data[req.Offset:], req.Data) + return nil +} + +type mmap struct { + inMemoryFile + // We don't actually care about whether the fsync happened or not; + // this just lets us force the page cache to send the writes to + // FUSE, so we can reliably verify they came through. + record.Fsyncs +} + +func TestMmap(t *testing.T) { + const size = 16 * 4096 + writes := map[int]byte{ + 10: 'a', + 4096: 'b', + 4097: 'c', + size - 4096: 'd', + size - 1: 'z', + } + + // Run the mmap-using parts of the test in a subprocess, to avoid + // an intentional page fault hanging the whole process (because it + // would need to be served by the same process, and there might + // not be a thread free to do that). Merely bumping GOMAXPROCS is + // not enough to prevent the hangs reliably. + if childMode { + f, err := os.Create("child") + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + + data, err := syscall.Mmap(int(f.Fd()), 0, size, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + t.Fatalf("Mmap: %v", err) + } + + for i, b := range writes { + data[i] = b + } + + if err := syscallx.Msync(data, syscall.MS_SYNC); err != nil { + t.Fatalf("Msync: %v", err) + } + + if err := syscall.Munmap(data); err != nil { + t.Fatalf("Munmap: %v", err) + } + + if err := f.Sync(); err != nil { + t.Fatalf("Fsync = %v", err) + } + + err = f.Close() + if err != nil { + t.Fatalf("Close: %v", err) + } + + return + } + + w := &mmap{} + w.data = make([]byte, size) + mnt, err := fstestutil.MountedT(t, childMapFS{"child": w}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + child, err := childCmd("TestMmap") + if err != nil { + t.Fatal(err) + } + child.Dir = mnt.Dir + if err := child.Run(); err != nil { + t.Fatal(err) + } + + got := w.data + if g, e := len(got), size; g != e { + t.Fatalf("bad write length: %d != %d", g, e) + } + for i, g := range got { + // default '\x00' for writes[i] is good here + if e := writes[i]; g != e { + t.Errorf("wrong byte at offset %d: %q != %q", i, g, e) + } + } +} + +// Test direct Read. + +type directRead struct{ file } + +// explicitly not defining Attr and setting Size + +func (f directRead) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + // do not allow the kernel to use page cache + resp.Flags |= fuse.OpenDirectIO + return f, nil +} + +func (directRead) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error { + fuseutil.HandleRead(req, resp, []byte(hi)) + return nil +} + +func TestDirectRead(t *testing.T) { + t.Parallel() + mnt, err := fstestutil.MountedT(t, childMapFS{"child": directRead{}}) + if err != nil { + t.Fatal(err) + } + defer mnt.Close() + + testReadAll(t, mnt.Dir+"/child") +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fs/tree.go b/Godeps/_workspace/src/bazil.org/fuse/fs/tree.go new file mode 100644 index 000000000..13d6aa73e --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fs/tree.go @@ -0,0 +1,96 @@ +// FUSE directory tree, for servers that wish to use it with the service loop. + +package fs + +import ( + "os" + pathpkg "path" + "strings" +) + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" +) + +// A Tree implements a basic read-only directory tree for FUSE. +// The Nodes contained in it may still be writable. +type Tree struct { + tree +} + +func (t *Tree) Root() (Node, fuse.Error) { + return &t.tree, nil +} + +// Add adds the path to the tree, resolving to the given node. +// If path or a prefix of path has already been added to the tree, +// Add panics. +// +// Add is only safe to call before starting to serve requests. +func (t *Tree) Add(path string, node Node) { + path = pathpkg.Clean("/" + path)[1:] + elems := strings.Split(path, "/") + dir := Node(&t.tree) + for i, elem := range elems { + dt, ok := dir.(*tree) + if !ok { + panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path) + } + n := dt.lookup(elem) + if n != nil { + if i+1 == len(elems) { + panic("fuse: Tree.Add for " + path + " conflicts with " + elem) + } + dir = n + } else { + if i+1 == len(elems) { + dt.add(elem, node) + } else { + dir = &tree{} + dt.add(elem, dir) + } + } + } +} + +type treeDir struct { + name string + node Node +} + +type tree struct { + dir []treeDir +} + +func (t *tree) lookup(name string) Node { + for _, d := range t.dir { + if d.name == name { + return d.node + } + } + return nil +} + +func (t *tree) add(name string, n Node) { + t.dir = append(t.dir, treeDir{name, n}) +} + +func (t *tree) Attr() fuse.Attr { + return fuse.Attr{Mode: os.ModeDir | 0555} +} + +func (t *tree) Lookup(name string, intr Intr) (Node, fuse.Error) { + n := t.lookup(name) + if n != nil { + return n, nil + } + return nil, fuse.ENOENT +} + +func (t *tree) ReadDir(intr Intr) ([]fuse.Dirent, fuse.Error) { + var out []fuse.Dirent + for _, d := range t.dir { + out = append(out, fuse.Dirent{Name: d.name}) + } + return out, nil +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuse.go b/Godeps/_workspace/src/bazil.org/fuse/fuse.go new file mode 100644 index 000000000..7ba9af016 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuse.go @@ -0,0 +1,1906 @@ +// See the file LICENSE for copyright and licensing information. +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + +// Package fuse enables writing FUSE file systems on Linux, OS X, and FreeBSD. +// +// On OS X, it requires OSXFUSE (http://osxfuse.github.com/). +// +// There are two approaches to writing a FUSE file system. The first is to speak +// the low-level message protocol, reading from a Conn using ReadRequest and +// writing using the various Respond methods. This approach is closest to +// the actual interaction with the kernel and can be the simplest one in contexts +// such as protocol translators. +// +// Servers of synthesized file systems tend to share common +// bookkeeping abstracted away by the second approach, which is to +// call fs.Serve to serve the FUSE protocol using an implementation of +// the service methods in the interfaces FS* (file system), Node* (file +// or directory), and Handle* (opened file or directory). +// There are a daunting number of such methods that can be written, +// but few are required. +// The specific methods are described in the documentation for those interfaces. +// +// The hellofs subdirectory contains a simple illustration of the fs.Serve approach. +// +// Service Methods +// +// The required and optional methods for the FS, Node, and Handle interfaces +// have the general form +// +// Op(req *OpRequest, resp *OpResponse, intr Intr) Error +// +// where Op is the name of a FUSE operation. Op reads request parameters +// from req and writes results to resp. An operation whose only result is +// the error result omits the resp parameter. Multiple goroutines may call +// service methods simultaneously; the methods being called are responsible +// for appropriate synchronization. +// +// Interrupted Operations +// +// In some file systems, some operations +// may take an undetermined amount of time. For example, a Read waiting for +// a network message or a matching Write might wait indefinitely. If the request +// is cancelled and no longer needed, the package will close intr, a chan struct{}. +// Blocking operations should select on a receive from intr and attempt to +// abort the operation early if the receive succeeds (meaning the channel is closed). +// To indicate that the operation failed because it was aborted, return fuse.EINTR. +// +// If an operation does not block for an indefinite amount of time, the intr parameter +// can be ignored. +// +// Authentication +// +// All requests types embed a Header, meaning that the method can inspect +// req.Pid, req.Uid, and req.Gid as necessary to implement permission checking. +// Alternately, XXX. +// +// Mount Options +// +// XXX +// +package fuse + +// BUG(rsc): The mount code for FreeBSD has not been written yet. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sync" + "syscall" + "time" + "unsafe" +) + +// A Conn represents a connection to a mounted FUSE file system. +type Conn struct { + // Ready is closed when the mount is complete or has failed. + Ready <-chan struct{} + + // MountError stores any error from the mount process. Only valid + // after Ready is closed. + MountError error + + // File handle for kernel communication. Only safe to access if + // rio or wio is held. + dev *os.File + buf []byte + wio sync.Mutex + rio sync.RWMutex +} + +// Mount mounts a new FUSE connection on the named directory +// and returns a connection for reading and writing FUSE messages. +// +// After a successful return, caller must call Close to free +// resources. +// +// Even on successful return, the new mount is not guaranteed to be +// visible until after Conn.Ready is closed. See Conn.MountError for +// possible errors. Incoming requests on Conn must be served to make +// progress. +func Mount(dir string) (*Conn, error) { + // TODO(rsc): mount options (...string?) + ready := make(chan struct{}, 1) + c := &Conn{ + Ready: ready, + } + f, err := mount(dir, ready, &c.MountError) + if err != nil { + return nil, err + } + c.dev = f + return c, nil +} + +// A Request represents a single FUSE request received from the kernel. +// Use a type switch to determine the specific kind. +// A request of unrecognized type will have concrete type *Header. +type Request interface { + // Hdr returns the Header associated with this request. + Hdr() *Header + + // RespondError responds to the request with the given error. + RespondError(Error) + + String() string +} + +// A RequestID identifies an active FUSE request. +type RequestID uint64 + +// A NodeID is a number identifying a directory or file. +// It must be unique among IDs returned in LookupResponses +// that have not yet been forgotten by ForgetRequests. +type NodeID uint64 + +// A HandleID is a number identifying an open directory or file. +// It only needs to be unique while the directory or file is open. +type HandleID uint64 + +// The RootID identifies the root directory of a FUSE file system. +const RootID NodeID = rootID + +// A Header describes the basic information sent in every request. +type Header struct { + Conn *Conn `json:"-"` // connection this request was received on + ID RequestID // unique ID for request + Node NodeID // file or directory the request is about + Uid uint32 // user ID of process making request + Gid uint32 // group ID of process making request + Pid uint32 // process ID of process making request +} + +func (h *Header) String() string { + return fmt.Sprintf("ID=%#x Node=%#x Uid=%d Gid=%d Pid=%d", h.ID, h.Node, h.Uid, h.Gid, h.Pid) +} + +func (h *Header) Hdr() *Header { + return h +} + +// An Error is a FUSE error. +// +// Errors messages will be visible in the debug log as part of the +// response. +// +// The FUSE interface can only communicate POSIX errno error numbers +// to file system clients, the message is not visible to file system +// clients. The returned error can implement ErrorNumber to control +// the errno returned. Without ErrorNumber, a generic errno (EIO) is +// returned. +type Error error + +// An ErrorNumber is an error with a specific error number. +// +// Operations may return an error value that implements ErrorNumber to +// control what specific error number (errno) to return. +type ErrorNumber interface { + // Errno returns the the error number (errno) for this error. + Errno() Errno +} + +const ( + // ENOSYS indicates that the call is not supported. + ENOSYS = Errno(syscall.ENOSYS) + + // ESTALE is used by Serve to respond to violations of the FUSE protocol. + ESTALE = Errno(syscall.ESTALE) + + ENOENT = Errno(syscall.ENOENT) + EIO = Errno(syscall.EIO) + EPERM = Errno(syscall.EPERM) + + // EINTR indicates request was interrupted by an InterruptRequest. + // See also fs.Intr. + EINTR = Errno(syscall.EINTR) + + ENODATA = Errno(syscall.ENODATA) + ERANGE = Errno(syscall.ERANGE) + ENOTSUP = Errno(syscall.ENOTSUP) + EEXIST = Errno(syscall.EEXIST) +) + +// DefaultErrno is the errno used when error returned does not +// implement ErrorNumber. +const DefaultErrno = EIO + +var errnoNames = map[Errno]string{ + ENOSYS: "ENOSYS", + ESTALE: "ESTALE", + ENOENT: "ENOENT", + EIO: "EIO", + EPERM: "EPERM", + EINTR: "EINTR", + ENODATA: "ENODATA", + EEXIST: "EEXIST", +} + +// Errno implements Error and ErrorNumber using a syscall.Errno. +type Errno syscall.Errno + +var _ = ErrorNumber(Errno(0)) +var _ = Error(Errno(0)) +var _ = error(Errno(0)) + +func (e Errno) Errno() Errno { + return e +} + +func (e Errno) String() string { + return syscall.Errno(e).Error() +} + +func (e Errno) Error() string { + return syscall.Errno(e).Error() +} + +// ErrnoName returns the short non-numeric identifier for this errno. +// For example, "EIO". +func (e Errno) ErrnoName() string { + s := errnoNames[e] + if s == "" { + s = fmt.Sprint(e.Errno()) + } + return s +} + +func (e Errno) MarshalText() ([]byte, error) { + s := e.ErrnoName() + return []byte(s), nil +} + +func (h *Header) RespondError(err Error) { + errno := DefaultErrno + if ferr, ok := err.(ErrorNumber); ok { + errno = ferr.Errno() + } + // FUSE uses negative errors! + // TODO: File bug report against OSXFUSE: positive error causes kernel panic. + out := &outHeader{Error: -int32(errno), Unique: uint64(h.ID)} + h.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// Maximum file write size we are prepared to receive from the kernel. +const maxWrite = 128 * 1024 + +// All requests read from the kernel, without data, are shorter than +// this. +var maxRequestSize = syscall.Getpagesize() +var bufSize = maxRequestSize + maxWrite + +// a message represents the bytes of a single FUSE message +type message struct { + conn *Conn + buf []byte // all bytes + hdr *inHeader // header + off int // offset for reading additional fields +} + +func newMessage(c *Conn) *message { + m := &message{conn: c, buf: make([]byte, bufSize)} + m.hdr = (*inHeader)(unsafe.Pointer(&m.buf[0])) + return m +} + +func (m *message) len() uintptr { + return uintptr(len(m.buf) - m.off) +} + +func (m *message) data() unsafe.Pointer { + var p unsafe.Pointer + if m.off < len(m.buf) { + p = unsafe.Pointer(&m.buf[m.off]) + } + return p +} + +func (m *message) bytes() []byte { + return m.buf[m.off:] +} + +func (m *message) Header() Header { + h := m.hdr + return Header{Conn: m.conn, ID: RequestID(h.Unique), Node: NodeID(h.Nodeid), Uid: h.Uid, Gid: h.Gid, Pid: h.Pid} +} + +// fileMode returns a Go os.FileMode from a Unix mode. +func fileMode(unixMode uint32) os.FileMode { + mode := os.FileMode(unixMode & 0777) + switch unixMode & syscall.S_IFMT { + case syscall.S_IFREG: + // nothing + case syscall.S_IFDIR: + mode |= os.ModeDir + case syscall.S_IFCHR: + mode |= os.ModeCharDevice | os.ModeDevice + case syscall.S_IFBLK: + mode |= os.ModeDevice + case syscall.S_IFIFO: + mode |= os.ModeNamedPipe + case syscall.S_IFLNK: + mode |= os.ModeSymlink + case syscall.S_IFSOCK: + mode |= os.ModeSocket + default: + // no idea + mode |= os.ModeDevice + } + if unixMode&syscall.S_ISUID != 0 { + mode |= os.ModeSetuid + } + if unixMode&syscall.S_ISGID != 0 { + mode |= os.ModeSetgid + } + return mode +} + +type noOpcode struct { + Opcode uint32 +} + +func (m noOpcode) String() string { + return fmt.Sprintf("No opcode %v", m.Opcode) +} + +type malformedMessage struct { +} + +func (malformedMessage) String() string { + return "malformed message" +} + +// Close closes the FUSE connection. +func (c *Conn) Close() error { + c.wio.Lock() + defer c.wio.Unlock() + c.rio.Lock() + defer c.rio.Unlock() + return c.dev.Close() +} + +// caller must hold wio or rio +func (c *Conn) fd() int { + return int(c.dev.Fd()) +} + +func (c *Conn) ReadRequest() (Request, error) { + // TODO: Some kind of buffer reuse. + m := newMessage(c) +loop: + c.rio.RLock() + n, err := syscall.Read(c.fd(), m.buf) + c.rio.RUnlock() + if err == syscall.EINTR { + // OSXFUSE sends EINTR to userspace when a request interrupt + // completed before it got sent to userspace? + goto loop + } + if err != nil && err != syscall.ENODEV { + return nil, err + } + if n <= 0 { + return nil, io.EOF + } + m.buf = m.buf[:n] + + if n < inHeaderSize { + return nil, errors.New("fuse: message too short") + } + + // FreeBSD FUSE sends a short length in the header + // for FUSE_INIT even though the actual read length is correct. + if n == inHeaderSize+initInSize && m.hdr.Opcode == opInit && m.hdr.Len < uint32(n) { + m.hdr.Len = uint32(n) + } + + // OSXFUSE sometimes sends the wrong m.hdr.Len in a FUSE_WRITE message. + if m.hdr.Len < uint32(n) && m.hdr.Len >= uint32(unsafe.Sizeof(writeIn{})) && m.hdr.Opcode == opWrite { + m.hdr.Len = uint32(n) + } + + if m.hdr.Len != uint32(n) { + return nil, fmt.Errorf("fuse: read %d opcode %d but expected %d", n, m.hdr.Opcode, m.hdr.Len) + } + + m.off = inHeaderSize + + // Convert to data structures. + // Do not trust kernel to hand us well-formed data. + var req Request + switch m.hdr.Opcode { + default: + Debug(noOpcode{Opcode: m.hdr.Opcode}) + goto unrecognized + + case opLookup: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &LookupRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opForget: + in := (*forgetIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ForgetRequest{ + Header: m.Header(), + N: in.Nlookup, + } + + case opGetattr: + req = &GetattrRequest{ + Header: m.Header(), + } + + case opSetattr: + in := (*setattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &SetattrRequest{ + Header: m.Header(), + Valid: SetattrValid(in.Valid), + Handle: HandleID(in.Fh), + Size: in.Size, + Atime: time.Unix(int64(in.Atime), int64(in.AtimeNsec)), + Mtime: time.Unix(int64(in.Mtime), int64(in.MtimeNsec)), + Mode: fileMode(in.Mode), + Uid: in.Uid, + Gid: in.Gid, + Bkuptime: in.BkupTime(), + Chgtime: in.Chgtime(), + Flags: in.Flags(), + } + + case opReadlink: + if len(m.bytes()) > 0 { + goto corrupt + } + req = &ReadlinkRequest{ + Header: m.Header(), + } + + case opSymlink: + // m.bytes() is "newName\0target\0" + names := m.bytes() + if len(names) == 0 || names[len(names)-1] != 0 { + goto corrupt + } + i := bytes.IndexByte(names, '\x00') + if i < 0 { + goto corrupt + } + newName, target := names[0:i], names[i+1:len(names)-1] + req = &SymlinkRequest{ + Header: m.Header(), + NewName: string(newName), + Target: string(target), + } + + case opLink: + in := (*linkIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newName := m.bytes()[unsafe.Sizeof(*in):] + if len(newName) < 2 || newName[len(newName)-1] != 0 { + goto corrupt + } + newName = newName[:len(newName)-1] + req = &LinkRequest{ + Header: m.Header(), + OldNode: NodeID(in.Oldnodeid), + NewName: string(newName), + } + + case opMknod: + in := (*mknodIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + if len(name) < 2 || name[len(name)-1] != '\x00' { + goto corrupt + } + name = name[:len(name)-1] + req = &MknodRequest{ + Header: m.Header(), + Mode: fileMode(in.Mode), + Rdev: in.Rdev, + Name: string(name), + } + + case opMkdir: + in := (*mkdirIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &MkdirRequest{ + Header: m.Header(), + Name: string(name[:i]), + // observed on Linux: mkdirIn.Mode & syscall.S_IFMT == 0, + // and this causes fileMode to go into it's "no idea" + // code branch; enforce type to directory + Mode: fileMode((in.Mode &^ syscall.S_IFMT) | syscall.S_IFDIR), + } + + case opUnlink, opRmdir: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemoveRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + Dir: m.hdr.Opcode == opRmdir, + } + + case opRename: + in := (*renameIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newDirNodeID := NodeID(in.Newdir) + oldNew := m.bytes()[unsafe.Sizeof(*in):] + // oldNew should be "old\x00new\x00" + if len(oldNew) < 4 { + goto corrupt + } + if oldNew[len(oldNew)-1] != '\x00' { + goto corrupt + } + i := bytes.IndexByte(oldNew, '\x00') + if i < 0 { + goto corrupt + } + oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) + req = &RenameRequest{ + Header: m.Header(), + NewDir: newDirNodeID, + OldName: oldName, + NewName: newName, + } + + case opOpendir, opOpen: + in := (*openIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &OpenRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opOpendir, + Flags: openFlags(in.Flags), + } + + case opRead, opReaddir: + in := (*readIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ReadRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReaddir, + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Size: int(in.Size), + } + + case opWrite: + in := (*writeIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + r := &WriteRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Flags: WriteFlags(in.WriteFlags), + } + buf := m.bytes()[unsafe.Sizeof(*in):] + if uint32(len(buf)) < in.Size { + goto corrupt + } + r.Data = buf + req = r + + case opStatfs: + req = &StatfsRequest{ + Header: m.Header(), + } + + case opRelease, opReleasedir: + in := (*releaseIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ReleaseRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReleasedir, + Handle: HandleID(in.Fh), + Flags: openFlags(in.Flags), + ReleaseFlags: ReleaseFlags(in.ReleaseFlags), + LockOwner: in.LockOwner, + } + + case opFsync, opFsyncdir: + in := (*fsyncIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FsyncRequest{ + Dir: m.hdr.Opcode == opFsyncdir, + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FsyncFlags, + } + + case opSetxattr: + in := (*setxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + m.off += int(unsafe.Sizeof(*in)) + name := m.bytes() + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + xattr := name[i+1:] + if uint32(len(xattr)) < in.Size { + goto corrupt + } + xattr = xattr[:in.Size] + req = &SetxattrRequest{ + Header: m.Header(), + Flags: in.Flags, + Position: in.position(), + Name: string(name[:i]), + Xattr: xattr, + } + + case opGetxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &GetxattrRequest{ + Header: m.Header(), + Name: string(name[:i]), + Size: in.Size, + Position: in.position(), + } + + case opListxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ListxattrRequest{ + Header: m.Header(), + Size: in.Size, + Position: in.position(), + } + + case opRemovexattr: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemovexattrRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opFlush: + in := (*flushIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FlushRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FlushFlags, + LockOwner: in.LockOwner, + } + + case opInit: + in := (*initIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InitRequest{ + Header: m.Header(), + Major: in.Major, + Minor: in.Minor, + MaxReadahead: in.MaxReadahead, + Flags: InitFlags(in.Flags), + } + + case opGetlk: + panic("opGetlk") + case opSetlk: + panic("opSetlk") + case opSetlkw: + panic("opSetlkw") + + case opAccess: + in := (*accessIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &AccessRequest{ + Header: m.Header(), + Mask: in.Mask, + } + + case opCreate: + in := (*createIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &CreateRequest{ + Header: m.Header(), + Flags: openFlags(in.Flags), + Mode: fileMode(in.Mode), + Name: string(name[:i]), + } + + case opInterrupt: + in := (*interruptIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InterruptRequest{ + Header: m.Header(), + IntrID: RequestID(in.Unique), + } + + case opBmap: + panic("opBmap") + + case opDestroy: + req = &DestroyRequest{ + Header: m.Header(), + } + + // OS X + case opSetvolname: + panic("opSetvolname") + case opGetxtimes: + panic("opGetxtimes") + case opExchange: + panic("opExchange") + } + + return req, nil + +corrupt: + Debug(malformedMessage{}) + return nil, fmt.Errorf("fuse: malformed message") + +unrecognized: + // Unrecognized message. + // Assume higher-level code will send a "no idea what you mean" error. + h := m.Header() + return &h, nil +} + +type bugShortKernelWrite struct { + Written int64 + Length int64 + Error string + Stack string +} + +func (b bugShortKernelWrite) String() string { + return fmt.Sprintf("short kernel write: written=%d/%d error=%q stack=\n%s", b.Written, b.Length, b.Error, b.Stack) +} + +// safe to call even with nil error +func errorString(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func (c *Conn) respond(out *outHeader, n uintptr) { + c.wio.Lock() + defer c.wio.Unlock() + out.Len = uint32(n) + msg := (*[1 << 30]byte)(unsafe.Pointer(out))[:n] + nn, err := syscall.Write(c.fd(), msg) + if nn != len(msg) || err != nil { + Debug(bugShortKernelWrite{ + Written: int64(nn), + Length: int64(len(msg)), + Error: errorString(err), + Stack: stack(), + }) + } +} + +func (c *Conn) respondData(out *outHeader, n uintptr, data []byte) { + c.wio.Lock() + defer c.wio.Unlock() + // TODO: use writev + out.Len = uint32(n + uintptr(len(data))) + msg := make([]byte, out.Len) + copy(msg, (*[1 << 30]byte)(unsafe.Pointer(out))[:n]) + copy(msg[n:], data) + syscall.Write(c.fd(), msg) +} + +// An InitRequest is the first request sent on a FUSE file system. +type InitRequest struct { + Header `json:"-"` + Major uint32 + Minor uint32 + // Maximum readahead in bytes that the kernel plans to use. + MaxReadahead uint32 + Flags InitFlags +} + +func (r *InitRequest) String() string { + return fmt.Sprintf("Init [%s] %d.%d ra=%d fl=%v", &r.Header, r.Major, r.Minor, r.MaxReadahead, r.Flags) +} + +// An InitResponse is the response to an InitRequest. +type InitResponse struct { + // Maximum readahead in bytes that the kernel can use. Ignored if + // greater than InitRequest.MaxReadahead. + MaxReadahead uint32 + Flags InitFlags + // Maximum size of a single write operation. + // Linux enforces a minimum of 4 KiB. + MaxWrite uint32 +} + +func (r *InitResponse) String() string { + return fmt.Sprintf("Init %+v", *r) +} + +// Respond replies to the request with the given response. +func (r *InitRequest) Respond(resp *InitResponse) { + out := &initOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Major: kernelVersion, + Minor: kernelMinorVersion, + MaxReadahead: resp.MaxReadahead, + Flags: uint32(resp.Flags), + MaxWrite: resp.MaxWrite, + } + // MaxWrite larger than our receive buffer would just lead to + // errors on large writes. + if out.MaxWrite > maxWrite { + out.MaxWrite = maxWrite + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A StatfsRequest requests information about the mounted file system. +type StatfsRequest struct { + Header `json:"-"` +} + +func (r *StatfsRequest) String() string { + return fmt.Sprintf("Statfs [%s]\n", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *StatfsRequest) Respond(resp *StatfsResponse) { + out := &statfsOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + St: kstatfs{ + Blocks: resp.Blocks, + Bfree: resp.Bfree, + Bavail: resp.Bavail, + Files: resp.Files, + Bsize: resp.Bsize, + Namelen: resp.Namelen, + Frsize: resp.Frsize, + }, + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A StatfsResponse is the response to a StatfsRequest. +type StatfsResponse struct { + Blocks uint64 // Total data blocks in file system. + Bfree uint64 // Free blocks in file system. + Bavail uint64 // Free blocks in file system if you're not root. + Files uint64 // Total files in file system. + Ffree uint64 // Free files in file system. + Bsize uint32 // Block size + Namelen uint32 // Maximum file name length? + Frsize uint32 // Fragment size, smallest addressable data size in the file system. +} + +func (r *StatfsResponse) String() string { + return fmt.Sprintf("Statfs %+v", *r) +} + +// An AccessRequest asks whether the file can be accessed +// for the purpose specified by the mask. +type AccessRequest struct { + Header `json:"-"` + Mask uint32 +} + +func (r *AccessRequest) String() string { + return fmt.Sprintf("Access [%s] mask=%#x", &r.Header, r.Mask) +} + +// Respond replies to the request indicating that access is allowed. +// To deny access, use RespondError. +func (r *AccessRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// An Attr is the metadata for a single file or directory. +type Attr struct { + Inode uint64 // inode number + Size uint64 // size in bytes + Blocks uint64 // size in blocks + Atime time.Time // time of last access + Mtime time.Time // time of last modification + Ctime time.Time // time of last inode change + Crtime time.Time // time of creation (OS X only) + Mode os.FileMode // file mode + Nlink uint32 // number of links + Uid uint32 // owner uid + Gid uint32 // group gid + Rdev uint32 // device numbers + Flags uint32 // chflags(2) flags (OS X only) +} + +func unix(t time.Time) (sec uint64, nsec uint32) { + nano := t.UnixNano() + sec = uint64(nano / 1e9) + nsec = uint32(nano % 1e9) + return +} + +func (a *Attr) attr() (out attr) { + out.Ino = a.Inode + out.Size = a.Size + out.Blocks = a.Blocks + out.Atime, out.AtimeNsec = unix(a.Atime) + out.Mtime, out.MtimeNsec = unix(a.Mtime) + out.Ctime, out.CtimeNsec = unix(a.Ctime) + out.SetCrtime(unix(a.Crtime)) + out.Mode = uint32(a.Mode) & 0777 + switch { + default: + out.Mode |= syscall.S_IFREG + case a.Mode&os.ModeDir != 0: + out.Mode |= syscall.S_IFDIR + case a.Mode&os.ModeDevice != 0: + if a.Mode&os.ModeCharDevice != 0 { + out.Mode |= syscall.S_IFCHR + } else { + out.Mode |= syscall.S_IFBLK + } + case a.Mode&os.ModeNamedPipe != 0: + out.Mode |= syscall.S_IFIFO + case a.Mode&os.ModeSymlink != 0: + out.Mode |= syscall.S_IFLNK + case a.Mode&os.ModeSocket != 0: + out.Mode |= syscall.S_IFSOCK + } + if a.Mode&os.ModeSetuid != 0 { + out.Mode |= syscall.S_ISUID + } + if a.Mode&os.ModeSetgid != 0 { + out.Mode |= syscall.S_ISGID + } + out.Nlink = a.Nlink + if out.Nlink < 1 { + out.Nlink = 1 + } + out.Uid = a.Uid + out.Gid = a.Gid + out.Rdev = a.Rdev + out.SetFlags(a.Flags) + + return +} + +// A GetattrRequest asks for the metadata for the file denoted by r.Node. +type GetattrRequest struct { + Header `json:"-"` +} + +func (r *GetattrRequest) String() string { + return fmt.Sprintf("Getattr [%s]", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *GetattrRequest) Respond(resp *GetattrResponse) { + out := &attrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A GetattrResponse is the response to a GetattrRequest. +type GetattrResponse struct { + AttrValid time.Duration // how long Attr can be cached + Attr Attr // file attributes +} + +func (r *GetattrResponse) String() string { + return fmt.Sprintf("Getattr %+v", *r) +} + +// A GetxattrRequest asks for the extended attributes associated with r.Node. +type GetxattrRequest struct { + Header `json:"-"` + + // Maximum size to return. + Size uint32 + + // Name of the attribute requested. + Name string + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 +} + +func (r *GetxattrRequest) String() string { + return fmt.Sprintf("Getxattr [%s] %q %d @%d", &r.Header, r.Name, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *GetxattrRequest) Respond(resp *GetxattrResponse) { + if r.Size == 0 { + out := &getxattrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Size: uint32(len(resp.Xattr)), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) + } else { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respondData(out, unsafe.Sizeof(*out), resp.Xattr) + } +} + +func (r *GetxattrRequest) RespondError(err Error) { + err = translateGetxattrError(err) + r.Header.RespondError(err) +} + +// A GetxattrResponse is the response to a GetxattrRequest. +type GetxattrResponse struct { + Xattr []byte +} + +func (r *GetxattrResponse) String() string { + return fmt.Sprintf("Getxattr %x", r.Xattr) +} + +// A ListxattrRequest asks to list the extended attributes associated with r.Node. +type ListxattrRequest struct { + Header `json:"-"` + Size uint32 // maximum size to return + Position uint32 // offset within attribute list +} + +func (r *ListxattrRequest) String() string { + return fmt.Sprintf("Listxattr [%s] %d @%d", &r.Header, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *ListxattrRequest) Respond(resp *ListxattrResponse) { + if r.Size == 0 { + out := &getxattrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Size: uint32(len(resp.Xattr)), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) + } else { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respondData(out, unsafe.Sizeof(*out), resp.Xattr) + } +} + +// A ListxattrResponse is the response to a ListxattrRequest. +type ListxattrResponse struct { + Xattr []byte +} + +func (r *ListxattrResponse) String() string { + return fmt.Sprintf("Listxattr %x", r.Xattr) +} + +// Append adds an extended attribute name to the response. +func (r *ListxattrResponse) Append(names ...string) { + for _, name := range names { + r.Xattr = append(r.Xattr, name...) + r.Xattr = append(r.Xattr, '\x00') + } +} + +// A RemovexattrRequest asks to remove an extended attribute associated with r.Node. +type RemovexattrRequest struct { + Header `json:"-"` + Name string // name of extended attribute +} + +func (r *RemovexattrRequest) String() string { + return fmt.Sprintf("Removexattr [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request, indicating that the attribute was removed. +func (r *RemovexattrRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +func (r *RemovexattrRequest) RespondError(err Error) { + err = translateGetxattrError(err) + r.Header.RespondError(err) +} + +// A SetxattrRequest asks to set an extended attribute associated with a file. +type SetxattrRequest struct { + Header `json:"-"` + + // Flags can make the request fail if attribute does/not already + // exist. Unfortunately, the constants are platform-specific and + // not exposed by Go1.2. Look for XATTR_CREATE, XATTR_REPLACE. + // + // TODO improve this later + // + // TODO XATTR_CREATE and exist -> EEXIST + // + // TODO XATTR_REPLACE and not exist -> ENODATA + Flags uint32 + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 + + Name string + Xattr []byte +} + +func (r *SetxattrRequest) String() string { + return fmt.Sprintf("Setxattr [%s] %q %x fl=%v @%#x", &r.Header, r.Name, r.Xattr, r.Flags, r.Position) +} + +// Respond replies to the request, indicating that the extended attribute was set. +func (r *SetxattrRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +func (r *SetxattrRequest) RespondError(err Error) { + err = translateGetxattrError(err) + r.Header.RespondError(err) +} + +// A LookupRequest asks to look up the given name in the directory named by r.Node. +type LookupRequest struct { + Header `json:"-"` + Name string +} + +func (r *LookupRequest) String() string { + return fmt.Sprintf("Lookup [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request with the given response. +func (r *LookupRequest) Respond(resp *LookupResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A LookupResponse is the response to a LookupRequest. +type LookupResponse struct { + Node NodeID + Generation uint64 + EntryValid time.Duration + AttrValid time.Duration + Attr Attr +} + +func (r *LookupResponse) String() string { + return fmt.Sprintf("Lookup %+v", *r) +} + +// An OpenRequest asks to open a file or directory +type OpenRequest struct { + Header `json:"-"` + Dir bool // is this Opendir? + Flags OpenFlags +} + +func (r *OpenRequest) String() string { + return fmt.Sprintf("Open [%s] dir=%v fl=%v", &r.Header, r.Dir, r.Flags) +} + +// Respond replies to the request with the given response. +func (r *OpenRequest) Respond(resp *OpenResponse) { + out := &openOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Fh: uint64(resp.Handle), + OpenFlags: uint32(resp.Flags), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A OpenResponse is the response to a OpenRequest. +type OpenResponse struct { + Handle HandleID + Flags OpenResponseFlags +} + +func (r *OpenResponse) String() string { + return fmt.Sprintf("Open %+v", *r) +} + +// A CreateRequest asks to create and open a file (not a directory). +type CreateRequest struct { + Header `json:"-"` + Name string + Flags OpenFlags + Mode os.FileMode +} + +func (r *CreateRequest) String() string { + return fmt.Sprintf("Create [%s] %q fl=%v mode=%v", &r.Header, r.Name, r.Flags, r.Mode) +} + +// Respond replies to the request with the given response. +func (r *CreateRequest) Respond(resp *CreateResponse) { + out := &createOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + + Fh: uint64(resp.Handle), + OpenFlags: uint32(resp.Flags), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A CreateResponse is the response to a CreateRequest. +// It describes the created node and opened handle. +type CreateResponse struct { + LookupResponse + OpenResponse +} + +func (r *CreateResponse) String() string { + return fmt.Sprintf("Create %+v", *r) +} + +// A MkdirRequest asks to create (but not open) a directory. +type MkdirRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode +} + +func (r *MkdirRequest) String() string { + return fmt.Sprintf("Mkdir [%s] %q mode=%v", &r.Header, r.Name, r.Mode) +} + +// Respond replies to the request with the given response. +func (r *MkdirRequest) Respond(resp *MkdirResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A MkdirResponse is the response to a MkdirRequest. +type MkdirResponse struct { + LookupResponse +} + +func (r *MkdirResponse) String() string { + return fmt.Sprintf("Mkdir %+v", *r) +} + +// A ReadRequest asks to read from an open file. +type ReadRequest struct { + Header `json:"-"` + Dir bool // is this Readdir? + Handle HandleID + Offset int64 + Size int +} + +func (r *ReadRequest) String() string { + return fmt.Sprintf("Read [%s] %#x %d @%#x dir=%v", &r.Header, r.Handle, r.Size, r.Offset, r.Dir) +} + +// Respond replies to the request with the given response. +func (r *ReadRequest) Respond(resp *ReadResponse) { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respondData(out, unsafe.Sizeof(*out), resp.Data) +} + +// A ReadResponse is the response to a ReadRequest. +type ReadResponse struct { + Data []byte +} + +func (r *ReadResponse) String() string { + return fmt.Sprintf("Read %d", len(r.Data)) +} + +type jsonReadResponse struct { + Len uint64 +} + +func (r *ReadResponse) MarshalJSON() ([]byte, error) { + j := jsonReadResponse{ + Len: uint64(len(r.Data)), + } + return json.Marshal(j) +} + +// A ReleaseRequest asks to release (close) an open file handle. +type ReleaseRequest struct { + Header `json:"-"` + Dir bool // is this Releasedir? + Handle HandleID + Flags OpenFlags // flags from OpenRequest + ReleaseFlags ReleaseFlags + LockOwner uint32 +} + +func (r *ReleaseRequest) String() string { + return fmt.Sprintf("Release [%s] %#x fl=%v rfl=%v owner=%#x", &r.Header, r.Handle, r.Flags, r.ReleaseFlags, r.LockOwner) +} + +// Respond replies to the request, indicating that the handle has been released. +func (r *ReleaseRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// A DestroyRequest is sent by the kernel when unmounting the file system. +// No more requests will be received after this one, but it should still be +// responded to. +type DestroyRequest struct { + Header `json:"-"` +} + +func (r *DestroyRequest) String() string { + return fmt.Sprintf("Destroy [%s]", &r.Header) +} + +// Respond replies to the request. +func (r *DestroyRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// A ForgetRequest is sent by the kernel when forgetting about r.Node +// as returned by r.N lookup requests. +type ForgetRequest struct { + Header `json:"-"` + N uint64 +} + +func (r *ForgetRequest) String() string { + return fmt.Sprintf("Forget [%s] %d", &r.Header, r.N) +} + +// Respond replies to the request, indicating that the forgetfulness has been recorded. +func (r *ForgetRequest) Respond() { + // Don't reply to forget messages. +} + +// A Dirent represents a single directory entry. +type Dirent struct { + // Inode this entry names. + Inode uint64 + + // Type of the entry, for example DT_File. + // + // Setting this is optional. The zero value (DT_Unknown) means + // callers will just need to do a Getattr when the type is + // needed. Providing a type can speed up operations + // significantly. + Type DirentType + + // Name of the entry + Name string +} + +// Type of an entry in a directory listing. +type DirentType uint32 + +const ( + // These don't quite match os.FileMode; especially there's an + // explicit unknown, instead of zero value meaning file. They + // are also not quite syscall.DT_*; nothing says the FUSE + // protocol follows those, and even if they were, we don't + // want each fs to fiddle with syscall. + + // The shift by 12 is hardcoded in the FUSE userspace + // low-level C library, so it's safe here. + + DT_Unknown DirentType = 0 + DT_Socket DirentType = syscall.S_IFSOCK >> 12 + DT_Link DirentType = syscall.S_IFLNK >> 12 + DT_File DirentType = syscall.S_IFREG >> 12 + DT_Block DirentType = syscall.S_IFBLK >> 12 + DT_Dir DirentType = syscall.S_IFDIR >> 12 + DT_Char DirentType = syscall.S_IFCHR >> 12 + DT_FIFO DirentType = syscall.S_IFIFO >> 12 +) + +func (t DirentType) String() string { + switch t { + case DT_Unknown: + return "unknown" + case DT_Socket: + return "socket" + case DT_Link: + return "link" + case DT_File: + return "file" + case DT_Block: + return "block" + case DT_Dir: + return "dir" + case DT_Char: + return "char" + case DT_FIFO: + return "fifo" + } + return "invalid" +} + +// AppendDirent appends the encoded form of a directory entry to data +// and returns the resulting slice. +func AppendDirent(data []byte, dir Dirent) []byte { + de := dirent{ + Ino: dir.Inode, + Namelen: uint32(len(dir.Name)), + Type: uint32(dir.Type), + } + de.Off = uint64(len(data) + direntSize + (len(dir.Name)+7)&^7) + data = append(data, (*[direntSize]byte)(unsafe.Pointer(&de))[:]...) + data = append(data, dir.Name...) + n := direntSize + uintptr(len(dir.Name)) + if n%8 != 0 { + var pad [8]byte + data = append(data, pad[:8-n%8]...) + } + return data +} + +// A WriteRequest asks to write to an open file. +type WriteRequest struct { + Header + Handle HandleID + Offset int64 + Data []byte + Flags WriteFlags +} + +func (r *WriteRequest) String() string { + return fmt.Sprintf("Write [%s] %#x %d @%d fl=%v", &r.Header, r.Handle, len(r.Data), r.Offset, r.Flags) +} + +type jsonWriteRequest struct { + Handle HandleID + Offset int64 + Len uint64 + Flags WriteFlags +} + +func (r *WriteRequest) MarshalJSON() ([]byte, error) { + j := jsonWriteRequest{ + Handle: r.Handle, + Offset: r.Offset, + Len: uint64(len(r.Data)), + Flags: r.Flags, + } + return json.Marshal(j) +} + +// Respond replies to the request with the given response. +func (r *WriteRequest) Respond(resp *WriteResponse) { + out := &writeOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Size: uint32(resp.Size), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A WriteResponse replies to a write indicating how many bytes were written. +type WriteResponse struct { + Size int +} + +func (r *WriteResponse) String() string { + return fmt.Sprintf("Write %+v", *r) +} + +// A SetattrRequest asks to change one or more attributes associated with a file, +// as indicated by Valid. +type SetattrRequest struct { + Header `json:"-"` + Valid SetattrValid + Handle HandleID + Size uint64 + Atime time.Time + Mtime time.Time + Mode os.FileMode + Uid uint32 + Gid uint32 + + // OS X only + Bkuptime time.Time + Chgtime time.Time + Crtime time.Time + Flags uint32 // see chflags(2) +} + +func (r *SetattrRequest) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "Setattr [%s]", &r.Header) + if r.Valid.Mode() { + fmt.Fprintf(&buf, " mode=%v", r.Mode) + } + if r.Valid.Uid() { + fmt.Fprintf(&buf, " uid=%d", r.Uid) + } + if r.Valid.Gid() { + fmt.Fprintf(&buf, " gid=%d", r.Gid) + } + if r.Valid.Size() { + fmt.Fprintf(&buf, " size=%d", r.Size) + } + if r.Valid.Atime() { + fmt.Fprintf(&buf, " atime=%v", r.Atime) + } + if r.Valid.AtimeNow() { + fmt.Fprintf(&buf, " atime=now") + } + if r.Valid.Mtime() { + fmt.Fprintf(&buf, " mtime=%v", r.Mtime) + } + if r.Valid.MtimeNow() { + fmt.Fprintf(&buf, " mtime=now") + } + if r.Valid.Handle() { + fmt.Fprintf(&buf, " handle=%#x", r.Handle) + } else { + fmt.Fprintf(&buf, " handle=INVALID-%#x", r.Handle) + } + if r.Valid.LockOwner() { + fmt.Fprintf(&buf, " lockowner") + } + if r.Valid.Crtime() { + fmt.Fprintf(&buf, " crtime=%v", r.Crtime) + } + if r.Valid.Chgtime() { + fmt.Fprintf(&buf, " chgtime=%v", r.Chgtime) + } + if r.Valid.Bkuptime() { + fmt.Fprintf(&buf, " bkuptime=%v", r.Bkuptime) + } + if r.Valid.Flags() { + fmt.Fprintf(&buf, " flags=%#x", r.Flags) + } + return buf.String() +} + +// Respond replies to the request with the given response, +// giving the updated attributes. +func (r *SetattrRequest) Respond(resp *SetattrResponse) { + out := &attrOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A SetattrResponse is the response to a SetattrRequest. +type SetattrResponse struct { + AttrValid time.Duration // how long Attr can be cached + Attr Attr // file attributes +} + +func (r *SetattrResponse) String() string { + return fmt.Sprintf("Setattr %+v", *r) +} + +// A FlushRequest asks for the current state of an open file to be flushed +// to storage, as when a file descriptor is being closed. A single opened Handle +// may receive multiple FlushRequests over its lifetime. +type FlushRequest struct { + Header `json:"-"` + Handle HandleID + Flags uint32 + LockOwner uint64 +} + +func (r *FlushRequest) String() string { + return fmt.Sprintf("Flush [%s] %#x fl=%#x lk=%#x", &r.Header, r.Handle, r.Flags, r.LockOwner) +} + +// Respond replies to the request, indicating that the flush succeeded. +func (r *FlushRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// A RemoveRequest asks to remove a file or directory. +type RemoveRequest struct { + Header `json:"-"` + Name string // name of extended attribute + Dir bool // is this rmdir? +} + +func (r *RemoveRequest) String() string { + return fmt.Sprintf("Remove [%s] %q dir=%v", &r.Header, r.Name, r.Dir) +} + +// Respond replies to the request, indicating that the file was removed. +func (r *RemoveRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// A SymlinkRequest is a request to create a symlink making NewName point to Target. +type SymlinkRequest struct { + Header `json:"-"` + NewName, Target string +} + +func (r *SymlinkRequest) String() string { + return fmt.Sprintf("Symlink [%s] from %q to target %q", &r.Header, r.NewName, r.Target) +} + +// Respond replies to the request, indicating that the symlink was created. +func (r *SymlinkRequest) Respond(resp *SymlinkResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A SymlinkResponse is the response to a SymlinkRequest. +type SymlinkResponse struct { + LookupResponse +} + +// A ReadlinkRequest is a request to read a symlink's target. +type ReadlinkRequest struct { + Header `json:"-"` +} + +func (r *ReadlinkRequest) String() string { + return fmt.Sprintf("Readlink [%s]", &r.Header) +} + +func (r *ReadlinkRequest) Respond(target string) { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respondData(out, unsafe.Sizeof(*out), []byte(target)) +} + +// A LinkRequest is a request to create a hard link. +type LinkRequest struct { + Header `json:"-"` + OldNode NodeID + NewName string +} + +func (r *LinkRequest) String() string { + return fmt.Sprintf("Link [%s] node %d to %q", &r.Header, r.OldNode, r.NewName) +} + +func (r *LinkRequest) Respond(resp *LookupResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A RenameRequest is a request to rename a file. +type RenameRequest struct { + Header `json:"-"` + NewDir NodeID + OldName, NewName string +} + +func (r *RenameRequest) String() string { + return fmt.Sprintf("Rename [%s] from %q to dirnode %d %q", &r.Header, r.OldName, r.NewDir, r.NewName) +} + +func (r *RenameRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +type MknodRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode + Rdev uint32 +} + +func (r *MknodRequest) String() string { + return fmt.Sprintf("Mknod [%s] Name %q mode %v rdev %d", &r.Header, r.Name, r.Mode, r.Rdev) +} + +func (r *MknodRequest) Respond(resp *LookupResponse) { + out := &entryOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + Nodeid: uint64(resp.Node), + Generation: resp.Generation, + EntryValid: uint64(resp.EntryValid / time.Second), + EntryValidNsec: uint32(resp.EntryValid % time.Second / time.Nanosecond), + AttrValid: uint64(resp.AttrValid / time.Second), + AttrValidNsec: uint32(resp.AttrValid % time.Second / time.Nanosecond), + Attr: resp.Attr.attr(), + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +type FsyncRequest struct { + Header `json:"-"` + Handle HandleID + // TODO bit 1 is datasync, not well documented upstream + Flags uint32 + Dir bool +} + +func (r *FsyncRequest) String() string { + return fmt.Sprintf("Fsync [%s] Handle %v Flags %v", &r.Header, r.Handle, r.Flags) +} + +func (r *FsyncRequest) Respond() { + out := &outHeader{Unique: uint64(r.ID)} + r.Conn.respond(out, unsafe.Sizeof(*out)) +} + +// An InterruptRequest is a request to interrupt another pending request. The +// response to that request should return an error status of EINTR. +type InterruptRequest struct { + Header `json:"-"` + IntrID RequestID // ID of the request to be interrupt. +} + +func (r *InterruptRequest) Respond() { + // nothing to do here +} + +func (r *InterruptRequest) String() string { + return fmt.Sprintf("Interrupt [%s] ID %v", &r.Header, r.IntrID) +} + +/*{ + +// A XXXRequest xxx. +type XXXRequest struct { + Header `json:"-"` + xxx +} + +func (r *XXXRequest) String() string { + return fmt.Sprintf("XXX [%s] xxx", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *XXXRequest) Respond(resp *XXXResponse) { + out := &xxxOut{ + outHeader: outHeader{Unique: uint64(r.ID)}, + xxx, + } + r.Conn.respond(&out.outHeader, unsafe.Sizeof(*out)) +} + +// A XXXResponse is the response to a XXXRequest. +type XXXResponse struct { + xxx +} + +func (r *XXXResponse) String() string { + return fmt.Sprintf("XXX %+v", *r) +} + + } +*/ diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel.go b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel.go new file mode 100644 index 000000000..a8ec12ca4 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel.go @@ -0,0 +1,639 @@ +// See the file LICENSE for copyright and licensing information. + +// Derived from FUSE's fuse_kernel.h +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ + +package fuse + +import ( + "fmt" + "syscall" + "unsafe" +) + +// Version is the FUSE version implemented by the package. +const Version = "7.8" + +const ( + kernelVersion = 7 + kernelMinorVersion = 8 + rootID = 1 +) + +type kstatfs struct { + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Bsize uint32 + Namelen uint32 + Frsize uint32 + Padding uint32 + Spare [6]uint32 +} + +type fileLock struct { + Start uint64 + End uint64 + Type uint32 + Pid uint32 +} + +// The SetattrValid are bit flags describing which fields in the SetattrRequest +// are included in the change. +type SetattrValid uint32 + +const ( + SetattrMode SetattrValid = 1 << 0 + SetattrUid SetattrValid = 1 << 1 + SetattrGid SetattrValid = 1 << 2 + SetattrSize SetattrValid = 1 << 3 + SetattrAtime SetattrValid = 1 << 4 + SetattrMtime SetattrValid = 1 << 5 + SetattrHandle SetattrValid = 1 << 6 + + // Linux only(?) + SetattrAtimeNow SetattrValid = 1 << 7 + SetattrMtimeNow SetattrValid = 1 << 8 + SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html + + // OS X only + SetattrCrtime SetattrValid = 1 << 28 + SetattrChgtime SetattrValid = 1 << 29 + SetattrBkuptime SetattrValid = 1 << 30 + SetattrFlags SetattrValid = 1 << 31 +) + +func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 } +func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 } +func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 } +func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 } +func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 } +func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 } +func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 } +func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 } +func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 } +func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 } +func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 } +func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 } +func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 } +func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 } + +func (fl SetattrValid) String() string { + return flagString(uint32(fl), setattrValidNames) +} + +var setattrValidNames = []flagName{ + {uint32(SetattrMode), "SetattrMode"}, + {uint32(SetattrUid), "SetattrUid"}, + {uint32(SetattrGid), "SetattrGid"}, + {uint32(SetattrSize), "SetattrSize"}, + {uint32(SetattrAtime), "SetattrAtime"}, + {uint32(SetattrMtime), "SetattrMtime"}, + {uint32(SetattrHandle), "SetattrHandle"}, + {uint32(SetattrAtimeNow), "SetattrAtimeNow"}, + {uint32(SetattrMtimeNow), "SetattrMtimeNow"}, + {uint32(SetattrLockOwner), "SetattrLockOwner"}, + {uint32(SetattrCrtime), "SetattrCrtime"}, + {uint32(SetattrChgtime), "SetattrChgtime"}, + {uint32(SetattrBkuptime), "SetattrBkuptime"}, + {uint32(SetattrFlags), "SetattrFlags"}, +} + +// Flags that can be seen in OpenRequest.Flags. +const ( + // Access modes. These are not 1-bit flags, but alternatives where + // only one can be chosen. See the IsReadOnly etc convenience + // methods. + OpenReadOnly OpenFlags = syscall.O_RDONLY + OpenWriteOnly OpenFlags = syscall.O_WRONLY + OpenReadWrite OpenFlags = syscall.O_RDWR + + OpenAppend OpenFlags = syscall.O_APPEND + OpenCreate OpenFlags = syscall.O_CREAT + OpenExclusive OpenFlags = syscall.O_EXCL + OpenSync OpenFlags = syscall.O_SYNC + OpenTruncate OpenFlags = syscall.O_TRUNC +) + +// OpenAccessModeMask is a bitmask that separates the access mode +// from the other flags in OpenFlags. +const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE + +// OpenFlags are the O_FOO flags passed to open/create/etc calls. For +// example, os.O_WRONLY | os.O_APPEND. +type OpenFlags uint32 + +func (fl OpenFlags) String() string { + // O_RDONLY, O_RWONLY, O_RDWR are not flags + s := accModeName(fl & OpenAccessModeMask) + flags := uint32(fl &^ OpenAccessModeMask) + if flags != 0 { + s = s + "+" + flagString(flags, openFlagNames) + } + return s +} + +// Return true if OpenReadOnly is set. +func (fl OpenFlags) IsReadOnly() bool { + return fl&OpenAccessModeMask == OpenReadOnly +} + +// Return true if OpenWriteOnly is set. +func (fl OpenFlags) IsWriteOnly() bool { + return fl&OpenAccessModeMask == OpenWriteOnly +} + +// Return true if OpenReadWrite is set. +func (fl OpenFlags) IsReadWrite() bool { + return fl&OpenAccessModeMask == OpenReadWrite +} + +func accModeName(flags OpenFlags) string { + switch flags { + case OpenReadOnly: + return "OpenReadOnly" + case OpenWriteOnly: + return "OpenWriteOnly" + case OpenReadWrite: + return "OpenReadWrite" + default: + return "" + } +} + +var openFlagNames = []flagName{ + {uint32(OpenCreate), "OpenCreate"}, + {uint32(OpenExclusive), "OpenExclusive"}, + {uint32(OpenTruncate), "OpenTruncate"}, + {uint32(OpenAppend), "OpenAppend"}, + {uint32(OpenSync), "OpenSync"}, +} + +// The OpenResponseFlags are returned in the OpenResponse. +type OpenResponseFlags uint32 + +const ( + OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file + OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open + OpenNonSeekable OpenResponseFlags = 1 << 2 // (Linux?) + + OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X + OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X +) + +func (fl OpenResponseFlags) String() string { + return flagString(uint32(fl), openResponseFlagNames) +} + +var openResponseFlagNames = []flagName{ + {uint32(OpenDirectIO), "OpenDirectIO"}, + {uint32(OpenKeepCache), "OpenKeepCache"}, + {uint32(OpenPurgeAttr), "OpenPurgeAttr"}, + {uint32(OpenPurgeUBC), "OpenPurgeUBC"}, +} + +// The InitFlags are used in the Init exchange. +type InitFlags uint32 + +const ( + InitAsyncRead InitFlags = 1 << 0 + InitPosixLocks InitFlags = 1 << 1 + InitFileOps InitFlags = 1 << 2 + InitAtomicTrunc InitFlags = 1 << 3 + InitExportSupport InitFlags = 1 << 4 + InitBigWrites InitFlags = 1 << 5 + InitDontMask InitFlags = 1 << 6 + InitSpliceWrite InitFlags = 1 << 7 + InitSpliceMove InitFlags = 1 << 8 + InitSpliceRead InitFlags = 1 << 9 + InitFlockLocks InitFlags = 1 << 10 + InitHasIoctlDir InitFlags = 1 << 11 + InitAutoInvalData InitFlags = 1 << 12 + InitDoReaddirplus InitFlags = 1 << 13 + InitReaddirplusAuto InitFlags = 1 << 14 + InitAsyncDIO InitFlags = 1 << 15 + InitWritebackCache InitFlags = 1 << 16 + InitNoOpenSupport InitFlags = 1 << 17 + + InitCaseSensitive InitFlags = 1 << 29 // OS X only + InitVolRename InitFlags = 1 << 30 // OS X only + InitXtimes InitFlags = 1 << 31 // OS X only +) + +type flagName struct { + bit uint32 + name string +} + +var initFlagNames = []flagName{ + {uint32(InitAsyncRead), "InitAsyncRead"}, + {uint32(InitPosixLocks), "InitPosixLocks"}, + {uint32(InitFileOps), "InitFileOps"}, + {uint32(InitAtomicTrunc), "InitAtomicTrunc"}, + {uint32(InitExportSupport), "InitExportSupport"}, + {uint32(InitBigWrites), "InitBigWrites"}, + {uint32(InitDontMask), "InitDontMask"}, + {uint32(InitSpliceWrite), "InitSpliceWrite"}, + {uint32(InitSpliceMove), "InitSpliceMove"}, + {uint32(InitSpliceRead), "InitSpliceRead"}, + {uint32(InitFlockLocks), "InitFlockLocks"}, + {uint32(InitHasIoctlDir), "InitHasIoctlDir"}, + {uint32(InitAutoInvalData), "InitAutoInvalData"}, + {uint32(InitDoReaddirplus), "InitDoReaddirplus"}, + {uint32(InitReaddirplusAuto), "InitReaddirplusAuto"}, + {uint32(InitAsyncDIO), "InitAsyncDIO"}, + {uint32(InitWritebackCache), "InitWritebackCache"}, + {uint32(InitNoOpenSupport), "InitNoOpenSupport"}, + + {uint32(InitCaseSensitive), "InitCaseSensitive"}, + {uint32(InitVolRename), "InitVolRename"}, + {uint32(InitXtimes), "InitXtimes"}, +} + +func (fl InitFlags) String() string { + return flagString(uint32(fl), initFlagNames) +} + +func flagString(f uint32, names []flagName) string { + var s string + + if f == 0 { + return "0" + } + + for _, n := range names { + if f&n.bit != 0 { + s += "+" + n.name + f &^= n.bit + } + } + if f != 0 { + s += fmt.Sprintf("%+#x", f) + } + return s[1:] +} + +// The ReleaseFlags are used in the Release exchange. +type ReleaseFlags uint32 + +const ( + ReleaseFlush ReleaseFlags = 1 << 0 +) + +func (fl ReleaseFlags) String() string { + return flagString(uint32(fl), releaseFlagNames) +} + +var releaseFlagNames = []flagName{ + {uint32(ReleaseFlush), "ReleaseFlush"}, +} + +// Opcodes +const ( + opLookup = 1 + opForget = 2 // no reply + opGetattr = 3 + opSetattr = 4 + opReadlink = 5 + opSymlink = 6 + opMknod = 8 + opMkdir = 9 + opUnlink = 10 + opRmdir = 11 + opRename = 12 + opLink = 13 + opOpen = 14 + opRead = 15 + opWrite = 16 + opStatfs = 17 + opRelease = 18 + opFsync = 20 + opSetxattr = 21 + opGetxattr = 22 + opListxattr = 23 + opRemovexattr = 24 + opFlush = 25 + opInit = 26 + opOpendir = 27 + opReaddir = 28 + opReleasedir = 29 + opFsyncdir = 30 + opGetlk = 31 + opSetlk = 32 + opSetlkw = 33 + opAccess = 34 + opCreate = 35 + opInterrupt = 36 + opBmap = 37 + opDestroy = 38 + opIoctl = 39 // Linux? + opPoll = 40 // Linux? + + // OS X + opSetvolname = 61 + opGetxtimes = 62 + opExchange = 63 +) + +type entryOut struct { + outHeader + Nodeid uint64 // Inode ID + Generation uint64 // Inode generation + EntryValid uint64 // Cache timeout for the name + AttrValid uint64 // Cache timeout for the attributes + EntryValidNsec uint32 + AttrValidNsec uint32 + Attr attr +} + +type forgetIn struct { + Nlookup uint64 +} + +type attrOut struct { + outHeader + AttrValid uint64 // Cache timeout for the attributes + AttrValidNsec uint32 + Dummy uint32 + Attr attr +} + +// OS X +type getxtimesOut struct { + outHeader + Bkuptime uint64 + Crtime uint64 + BkuptimeNsec uint32 + CrtimeNsec uint32 +} + +type mknodIn struct { + Mode uint32 + Rdev uint32 + // "filename\x00" follows. +} + +type mkdirIn struct { + Mode uint32 + Padding uint32 + // filename follows +} + +type renameIn struct { + Newdir uint64 + // "oldname\x00newname\x00" follows +} + +// OS X +type exchangeIn struct { + Olddir uint64 + Newdir uint64 + Options uint64 +} + +type linkIn struct { + Oldnodeid uint64 +} + +type setattrInCommon struct { + Valid uint32 + Padding uint32 + Fh uint64 + Size uint64 + LockOwner uint64 // unused on OS X? + Atime uint64 + Mtime uint64 + Unused2 uint64 + AtimeNsec uint32 + MtimeNsec uint32 + Unused3 uint32 + Mode uint32 + Unused4 uint32 + Uid uint32 + Gid uint32 + Unused5 uint32 +} + +type openIn struct { + Flags uint32 + Unused uint32 +} + +type openOut struct { + outHeader + Fh uint64 + OpenFlags uint32 + Padding uint32 +} + +type createIn struct { + Flags uint32 + Mode uint32 +} + +type createOut struct { + outHeader + + Nodeid uint64 // Inode ID + Generation uint64 // Inode generation + EntryValid uint64 // Cache timeout for the name + AttrValid uint64 // Cache timeout for the attributes + EntryValidNsec uint32 + AttrValidNsec uint32 + Attr attr + + Fh uint64 + OpenFlags uint32 + Padding uint32 +} + +type releaseIn struct { + Fh uint64 + Flags uint32 + ReleaseFlags uint32 + LockOwner uint32 +} + +type flushIn struct { + Fh uint64 + FlushFlags uint32 + Padding uint32 + LockOwner uint64 +} + +type readIn struct { + Fh uint64 + Offset uint64 + Size uint32 + Padding uint32 +} + +type writeIn struct { + Fh uint64 + Offset uint64 + Size uint32 + WriteFlags uint32 +} + +type writeOut struct { + outHeader + Size uint32 + Padding uint32 +} + +// The WriteFlags are returned in the WriteResponse. +type WriteFlags uint32 + +func (fl WriteFlags) String() string { + return flagString(uint32(fl), writeFlagNames) +} + +var writeFlagNames = []flagName{} + +const compatStatfsSize = 48 + +type statfsOut struct { + outHeader + St kstatfs +} + +type fsyncIn struct { + Fh uint64 + FsyncFlags uint32 + Padding uint32 +} + +type setxattrInCommon struct { + Size uint32 + Flags uint32 +} + +func (setxattrInCommon) position() uint32 { + return 0 +} + +type getxattrInCommon struct { + Size uint32 + Padding uint32 +} + +func (getxattrInCommon) position() uint32 { + return 0 +} + +type getxattrOut struct { + outHeader + Size uint32 + Padding uint32 +} + +type lkIn struct { + Fh uint64 + Owner uint64 + Lk fileLock +} + +type lkOut struct { + outHeader + Lk fileLock +} + +type accessIn struct { + Mask uint32 + Padding uint32 +} + +type initIn struct { + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 +} + +const initInSize = int(unsafe.Sizeof(initIn{})) + +type initOut struct { + outHeader + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 + Unused uint32 + MaxWrite uint32 +} + +type interruptIn struct { + Unique uint64 +} + +type bmapIn struct { + Block uint64 + BlockSize uint32 + Padding uint32 +} + +type bmapOut struct { + outHeader + Block uint64 +} + +type inHeader struct { + Len uint32 + Opcode uint32 + Unique uint64 + Nodeid uint64 + Uid uint32 + Gid uint32 + Pid uint32 + Padding uint32 +} + +const inHeaderSize = int(unsafe.Sizeof(inHeader{})) + +type outHeader struct { + Len uint32 + Error int32 + Unique uint64 +} + +type dirent struct { + Ino uint64 + Off uint64 + Namelen uint32 + Type uint32 + Name [0]byte +} + +const direntSize = 8 + 8 + 4 + 4 diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_darwin.go b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_darwin.go new file mode 100644 index 000000000..4f9347d03 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_darwin.go @@ -0,0 +1,86 @@ +package fuse + +import ( + "time" +) + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + Crtime_ uint64 // OS X only + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + CrtimeNsec uint32 // OS X only + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Flags_ uint32 // OS X only; see chflags(2) +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + a.Crtime_, a.CrtimeNsec = s, ns +} + +func (a *attr) SetFlags(f uint32) { + a.Flags_ = f +} + +type setattrIn struct { + setattrInCommon + + // OS X only + Bkuptime_ uint64 + Chgtime_ uint64 + Crtime uint64 + BkuptimeNsec uint32 + ChgtimeNsec uint32 + CrtimeNsec uint32 + Flags_ uint32 // see chflags(2) +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec)) +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec)) +} + +func (in *setattrIn) Flags() uint32 { + return in.Flags_ +} + +func openFlags(flags uint32) OpenFlags { + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (g *getxattrIn) position() uint32 { + return g.Position +} + +type setxattrIn struct { + setxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (s *setxattrIn) position() uint32 { + return s.Position +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_linux.go b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_linux.go new file mode 100644 index 000000000..6a752457a --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_linux.go @@ -0,0 +1,70 @@ +package fuse + +import "time" + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + // Blksize uint32 // Only in protocol 7.9 + // padding_ uint32 // Only in protocol 7.9 +} + +func (a *attr) Crtime() time.Time { + return time.Time{} +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + // Ignored on Linux. +} + +func (a *attr) SetFlags(f uint32) { + // Ignored on Linux. +} + +type setattrIn struct { + setattrInCommon +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Flags() uint32 { + return 0 +} + +func openFlags(flags uint32) OpenFlags { + // on amd64, the 32-bit O_LARGEFILE flag is always seen; + // on i386, the flag probably depends on the app + // requesting, but in any case should be utterly + // uninteresting to us here; our kernel protocol messages + // are not directly related to the client app's kernel + // API/ABI + flags &^= 0x8000 + + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon +} + +type setxattrIn struct { + setxattrInCommon +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_std.go b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_std.go new file mode 100644 index 000000000..074cfd322 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_std.go @@ -0,0 +1 @@ +package fuse diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_test.go b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_test.go new file mode 100644 index 000000000..b3dd9244d --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuse_kernel_test.go @@ -0,0 +1,31 @@ +package fuse_test + +import ( + "os" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" +) + +func TestOpenFlagsAccmodeMask(t *testing.T) { + var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC) + if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadWrite; g != e { + t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e) + } + if f.IsReadOnly() { + t.Fatalf("IsReadOnly is wrong: %v", f) + } + if f.IsWriteOnly() { + t.Fatalf("IsWriteOnly is wrong: %v", f) + } + if !f.IsReadWrite() { + t.Fatalf("IsReadWrite is wrong: %v", f) + } +} + +func TestOpenFlagsString(t *testing.T) { + var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC | os.O_APPEND) + if g, e := f.String(), "OpenReadWrite+OpenAppend+OpenSync"; g != e { + t.Fatalf("OpenFlags.String: %q != %q", g, e) + } +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/fuseutil/fuseutil.go b/Godeps/_workspace/src/bazil.org/fuse/fuseutil/fuseutil.go new file mode 100644 index 000000000..c4fac4fa3 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/fuseutil/fuseutil.go @@ -0,0 +1,20 @@ +package fuseutil + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" +) + +// HandleRead handles a read request assuming that data is the entire file content. +// It adjusts the amount returned in resp according to req.Offset and req.Size. +func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) { + if req.Offset >= int64(len(data)) { + data = nil + } else { + data = data[req.Offset:] + } + if len(data) > req.Size { + data = data[:req.Size] + } + n := copy(resp.Data[:req.Size], data) + resp.Data = resp.Data[:n] +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/hellofs/hello.go b/Godeps/_workspace/src/bazil.org/fuse/hellofs/hello.go new file mode 100644 index 000000000..68f7c9ab1 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/hellofs/hello.go @@ -0,0 +1,89 @@ +// Hellofs implements a simple "hello world" file system. +package main + +import ( + "flag" + "fmt" + "log" + "os" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" + _ "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil" +) + +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0]) + flag.PrintDefaults() +} + +func main() { + flag.Usage = Usage + flag.Parse() + + if flag.NArg() != 1 { + Usage() + os.Exit(2) + } + mountpoint := flag.Arg(0) + + c, err := fuse.Mount(mountpoint) + if err != nil { + log.Fatal(err) + } + defer c.Close() + + err = fs.Serve(c, FS{}) + if err != nil { + log.Fatal(err) + } + + // check if the mount process has an error to report + <-c.Ready + if err := c.MountError; err != nil { + log.Fatal(err) + } +} + +// FS implements the hello world file system. +type FS struct{} + +func (FS) Root() (fs.Node, fuse.Error) { + return Dir{}, nil +} + +// Dir implements both Node and Handle for the root directory. +type Dir struct{} + +func (Dir) Attr() fuse.Attr { + return fuse.Attr{Inode: 1, Mode: os.ModeDir | 0555} +} + +func (Dir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + if name == "hello" { + return File{}, nil + } + return nil, fuse.ENOENT +} + +var dirDirs = []fuse.Dirent{ + {Inode: 2, Name: "hello", Type: fuse.DT_File}, +} + +func (Dir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + return dirDirs, nil +} + +// File implements both Node and Handle for the hello file. +type File struct{} + +const greeting = "hello, world\n" + +func (File) Attr() fuse.Attr { + return fuse.Attr{Inode: 2, Mode: 0444, Size: uint64(len(greeting))} +} + +func (File) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { + return []byte(greeting), nil +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/mount_darwin.go b/Godeps/_workspace/src/bazil.org/fuse/mount_darwin.go new file mode 100644 index 000000000..d5ab2960a --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/mount_darwin.go @@ -0,0 +1,117 @@ +package fuse + +import ( + "bytes" + "errors" + "os" + "os/exec" + "strconv" + "syscall" +) + +var errNoAvail = errors.New("no available fuse devices") + +var errNotLoaded = errors.New("osxfusefs is not loaded") + +func loadOSXFUSE() error { + cmd := exec.Command("/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs") + cmd.Dir = "/" + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + return err +} + +func openOSXFUSEDev() (*os.File, error) { + var f *os.File + var err error + for i := uint64(0); ; i++ { + path := "/dev/osxfuse" + strconv.FormatUint(i, 10) + f, err = os.OpenFile(path, os.O_RDWR, 0000) + if os.IsNotExist(err) { + if i == 0 { + // not even the first device was found -> fuse is not loaded + return nil, errNotLoaded + } + + // we've run out of kernel-provided devices + return nil, errNoAvail + } + + if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY { + // try the next one + continue + } + + if err != nil { + return nil, err + } + return f, nil + } +} + +func callMount(dir string, f *os.File, ready chan<- struct{}, errp *error) error { + bin := "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs" + cmd := exec.Command( + bin, + // Tell osxfuse-kext how large our buffer is. It must split + // writes larger than this into multiple writes. + // + // TODO add buffer reuse, bump this up significantly + // + // OSXFUSE seems to ignore InitResponse.MaxWrite, and uses + // this instead. + "-o", "iosize="+strconv.FormatUint(maxWrite, 10), + // refers to fd passed in cmd.ExtraFiles + "3", + dir, + ) + cmd.ExtraFiles = []*os.File{f} + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=") + // TODO this is used for fs typenames etc, let app influence it + cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_DAEMON_PATH="+bin) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + err := cmd.Start() + if err != nil { + return err + } + go func() { + err = cmd.Wait() + if err != nil { + if buf.Len() > 0 { + output := buf.Bytes() + output = bytes.TrimRight(output, "\n") + msg := err.Error() + ": " + string(output) + err = errors.New(msg) + } + } + *errp = err + close(ready) + }() + return err +} + +func mount(dir string, ready chan<- struct{}, errp *error) (*os.File, error) { + f, err := openOSXFUSEDev() + if err == errNotLoaded { + err = loadOSXFUSE() + if err != nil { + return nil, err + } + // try again + f, err = openOSXFUSEDev() + } + if err != nil { + return nil, err + } + err = callMount(dir, f, ready, errp) + if err != nil { + f.Close() + return nil, err + } + return f, nil +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/mount_linux.go b/Godeps/_workspace/src/bazil.org/fuse/mount_linux.go new file mode 100644 index 000000000..ef95d093d --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/mount_linux.go @@ -0,0 +1,67 @@ +package fuse + +import ( + "fmt" + "net" + "os" + "os/exec" + "syscall" +) + +func mount(dir string, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) { + // linux mount is never delayed + close(ready) + + fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0) + if err != nil { + return nil, fmt.Errorf("socketpair error: %v", err) + } + defer syscall.Close(fds[0]) + defer syscall.Close(fds[1]) + + cmd := exec.Command("fusermount", "--", dir) + cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3") + + writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes") + defer writeFile.Close() + cmd.ExtraFiles = []*os.File{writeFile} + + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + return nil, fmt.Errorf("fusermount: %q, %v", out, err) + } + + readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads") + defer readFile.Close() + c, err := net.FileConn(readFile) + if err != nil { + return nil, fmt.Errorf("FileConn from fusermount socket: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return nil, fmt.Errorf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := syscall.ParseUnixRights(&scm) + if err != nil { + return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds) + } + f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse") + return f, nil +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/doc.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/doc.go new file mode 100644 index 000000000..8ceee43b0 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/doc.go @@ -0,0 +1,13 @@ +// Package syscallx provides wrappers that make syscalls on various +// platforms more interoperable. +// +// The API intentionally omits the OS X-specific position and option +// arguments for extended attribute calls. +// +// Not having position means it might not be useful for accessing the +// resource fork. If that's needed by code inside fuse, a function +// with a different name may be added on the side. +// +// Options can be implemented with separate wrappers, in the style of +// Linux getxattr/lgetxattr/fgetxattr. +package syscallx diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/generate b/Godeps/_workspace/src/bazil.org/fuse/syscallx/generate new file mode 100644 index 000000000..476a282b1 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/generate @@ -0,0 +1,34 @@ +#!/bin/sh +set -e + +mksys="$(go env GOROOT)/src/pkg/syscall/mksyscall.pl" + +fix() { + sed 's,^package syscall$,&x\nimport "syscall",' \ + | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ + | gofmt -r='Syscall6 -> syscall.Syscall6' \ + | gofmt -r='Syscall -> syscall.Syscall' \ + | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ + | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ + | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ + | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ + | gofmt -r='SYS_MSYNC -> syscall.SYS_MSYNC' +} + +cd "$(dirname "$0")" + +$mksys xattr_darwin.go \ + | fix \ + >xattr_darwin_amd64.go + +$mksys -l32 xattr_darwin.go \ + | fix \ + >xattr_darwin_386.go + +$mksys msync.go \ + | fix \ + >msync_amd64.go + +$mksys -l32 msync.go \ + | fix \ + >msync_386.go diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync.go new file mode 100644 index 000000000..30737e6d4 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync.go @@ -0,0 +1,9 @@ +package syscallx + +/* This is the source file for msync_*.go, to regenerate run + + ./generate + +*/ + +//sys Msync(b []byte, flags int) (err error) diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_386.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_386.go new file mode 100644 index 000000000..672599423 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_386.go @@ -0,0 +1,24 @@ +// mksyscall.pl -l32 msync.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_amd64.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_amd64.go new file mode 100644 index 000000000..0bbe1ab85 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/msync_amd64.go @@ -0,0 +1,24 @@ +// mksyscall.pl msync.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx.go new file mode 100644 index 000000000..eb099129e --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx.go @@ -0,0 +1,4 @@ +package syscallx + +// make us look more like package syscall, so mksyscall.pl output works +var _zero uintptr diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go new file mode 100644 index 000000000..57353a53e --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/syscallx_std.go @@ -0,0 +1,26 @@ +// +build !darwin + +package syscallx + +// This file just contains wrappers for platforms that already have +// the right stuff in stdlib. + +import ( + "syscall" +) + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return syscall.Getxattr(path, attr, dest) +} + +func Listxattr(path string, dest []byte) (sz int, err error) { + return syscall.Listxattr(path, dest) +} + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return syscall.Setxattr(path, attr, data, flags) +} + +func Removexattr(path string, attr string) (err error) { + return syscall.Removexattr(path, attr) +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin.go new file mode 100644 index 000000000..b00f90203 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin.go @@ -0,0 +1,38 @@ +package syscallx + +/* This is the source file for syscallx_darwin_*.go, to regenerate run + + ./generate + +*/ + +// cannot use dest []byte here because OS X getxattr really wants a +// NULL to trigger size probing, size==0 is not enough +// +//sys getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var destp *byte + if len(dest) > 0 { + destp = &dest[0] + } + return getxattr(path, attr, destp, len(dest), 0, 0) +} + +//sys listxattr(path string, dest []byte, options int) (sz int, err error) + +func Listxattr(path string, dest []byte) (sz int, err error) { + return listxattr(path, dest, 0) +} + +//sys setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return setxattr(path, attr, data, 0, flags) +} + +//sys removexattr(path string, attr string, options int) (err error) + +func Removexattr(path string, attr string) (err error) { + return removexattr(path, attr, 0) +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_386.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_386.go new file mode 100644 index 000000000..ffc357aef --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_386.go @@ -0,0 +1,97 @@ +// mksyscall.pl -l32 xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_amd64.go b/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_amd64.go new file mode 100644 index 000000000..864c4c1d4 --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/syscallx/xattr_darwin_amd64.go @@ -0,0 +1,97 @@ +// mksyscall.pl xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package syscallx + +import "syscall" + +import "unsafe" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags)) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/unmount.go b/Godeps/_workspace/src/bazil.org/fuse/unmount.go new file mode 100644 index 000000000..ffe3f155c --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/unmount.go @@ -0,0 +1,6 @@ +package fuse + +// Unmount tries to unmount the filesystem mounted at dir. +func Unmount(dir string) error { + return unmount(dir) +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/unmount_linux.go b/Godeps/_workspace/src/bazil.org/fuse/unmount_linux.go new file mode 100644 index 000000000..088f0cfee --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/unmount_linux.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "bytes" + "errors" + "os/exec" +) + +func unmount(dir string) error { + cmd := exec.Command("fusermount", "-u", dir) + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + output = bytes.TrimRight(output, "\n") + msg := err.Error() + ": " + string(output) + err = errors.New(msg) + } + return err + } + return nil +} diff --git a/Godeps/_workspace/src/bazil.org/fuse/unmount_std.go b/Godeps/_workspace/src/bazil.org/fuse/unmount_std.go new file mode 100644 index 000000000..d6efe276f --- /dev/null +++ b/Godeps/_workspace/src/bazil.org/fuse/unmount_std.go @@ -0,0 +1,17 @@ +// +build !linux + +package fuse + +import ( + "os" + "syscall" +) + +func unmount(dir string) error { + err := syscall.Unmount(dir, 0) + if err != nil { + err = &os.PathError{Op: "unmount", Path: dir, Err: err} + return err + } + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE new file mode 100644 index 000000000..ab6b011a1 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go new file mode 100644 index 000000000..50a0f2d09 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go new file mode 100644 index 000000000..d8bd013e6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go new file mode 100644 index 000000000..cdd4192fd --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go new file mode 100644 index 000000000..dd0a8ac18 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go new file mode 100644 index 000000000..b9369c200 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// adjusts the clock sequence as needed. An error is returned if the current +// time cannot be determined. +func GetTime() (Time, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go new file mode 100644 index 000000000..de40b102c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go new file mode 100644 index 000000000..2920fae63 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go new file mode 100644 index 000000000..417ebeb26 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go @@ -0,0 +1,390 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" +) + +type test struct { + in string + version Version + variant Variant + isuuid bool +} + +var tests = []test{ + {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, + {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, + {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, + {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, + {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, + {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, + {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, + {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, + {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, + {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, + {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, + {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, + {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, + {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, + + {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, + {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, + + {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, + {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, +} + +var constants = []struct { + c interface{} + name string +}{ + {Person, "Person"}, + {Group, "Group"}, + {Org, "Org"}, + {Invalid, "Invalid"}, + {RFC4122, "RFC4122"}, + {Reserved, "Reserved"}, + {Microsoft, "Microsoft"}, + {Future, "Future"}, + {Domain(17), "Domain17"}, + {Variant(42), "BadVariant42"}, +} + +func testTest(t *testing.T, in string, tt test) { + uuid := Parse(in) + if ok := (uuid != nil); ok != tt.isuuid { + t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if uuid == nil { + return + } + + if v := uuid.Variant(); v != tt.variant { + t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) + } + if v, _ := uuid.Version(); v != tt.version { + t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) + } +} + +func TestUUID(t *testing.T) { + for _, tt := range tests { + testTest(t, tt.in, tt) + testTest(t, strings.ToUpper(tt.in), tt) + } +} + +func TestConstants(t *testing.T) { + for x, tt := range constants { + v, ok := tt.c.(fmt.Stringer) + if !ok { + t.Errorf("%x: %v: not a stringer", x, v) + } else if s := v.String(); s != tt.name { + v, _ := tt.c.(int) + t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name) + } + } +} + +func TestRandomUUID(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + uuid := NewRandom() + s := uuid.String() + if m[s] { + t.Errorf("NewRandom returned duplicated UUID %s\n", s) + } + m[s] = true + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func TestNew(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + s := New() + if m[s] { + t.Errorf("New returned duplicated UUID %s\n", s) + } + m[s] = true + uuid := Parse(s) + if uuid == nil { + t.Errorf("New returned %q which does not decode\n", s) + continue + } + if v, _ := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s\n", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + } + } +} + +func clockSeq(t *testing.T, uuid UUID) int { + seq, ok := uuid.ClockSequence() + if !ok { + t.Fatalf("%s: invalid clock sequence\n", uuid) + } + return seq +} + +func TestClockSeq(t *testing.T) { + // Fake time.Now for this test to return a monotonically advancing time; restore it at end. + defer func(orig func() time.Time) { timeNow = orig }(timeNow) + monTime := time.Now() + timeNow = func() time.Time { + monTime = monTime.Add(1 * time.Second) + return monTime + } + + SetClockSequence(-1) + uuid1 := NewUUID() + uuid2 := NewUUID() + + if clockSeq(t, uuid1) != clockSeq(t, uuid2) { + t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2)) + } + + SetClockSequence(-1) + uuid2 = NewUUID() + + // Just on the very off chance we generated the same sequence + // two times we try again. + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + SetClockSequence(-1) + uuid2 = NewUUID() + } + if clockSeq(t, uuid1) == clockSeq(t, uuid2) { + t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1)) + } + + SetClockSequence(0x1234) + uuid1 = NewUUID() + if seq := clockSeq(t, uuid1); seq != 0x1234 { + t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq) + } +} + +func TestCoding(t *testing.T) { + text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" + urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" + data := UUID{ + 0x7d, 0x44, 0x48, 0x40, + 0x9d, 0xc0, + 0x11, 0xd1, + 0xb2, 0x45, + 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, + } + if v := data.String(); v != text { + t.Errorf("%x: encoded to %s, expected %s\n", data, v, text) + } + if v := data.URN(); v != urn { + t.Errorf("%x: urn is %s, expected %s\n", data, v, urn) + } + + uuid := Parse(text) + if !Equal(uuid, data) { + t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data) + } +} + +func TestVersion1(t *testing.T) { + uuid1 := NewUUID() + uuid2 := NewUUID() + + if Equal(uuid1, uuid2) { + t.Errorf("%s:duplicate uuid\n", uuid1) + } + if v, _ := uuid1.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid1, v) + } + if v, _ := uuid2.Version(); v != 1 { + t.Errorf("%s: version %s expected 1\n", uuid2, v) + } + n1 := uuid1.NodeID() + n2 := uuid2.NodeID() + if !bytes.Equal(n1, n2) { + t.Errorf("Different nodes %x != %x\n", n1, n2) + } + t1, ok := uuid1.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid1) + } + t2, ok := uuid2.Time() + if !ok { + t.Errorf("%s: invalid time\n", uuid2) + } + q1, ok := uuid1.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence\n", uuid1) + } + q2, ok := uuid2.ClockSequence() + if !ok { + t.Errorf("%s: invalid clock sequence", uuid2) + } + + switch { + case t1 == t2 && q1 == q2: + t.Errorf("time stopped\n") + case t1 > t2 && q1 == q2: + t.Errorf("time reversed\n") + case t1 < t2 && q1 != q2: + t.Errorf("clock sequence chaned unexpectedly\n") + } +} + +func TestNodeAndTime(t *testing.T) { + // Time is February 5, 1998 12:30:23.136364800 AM GMT + + uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") + node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} + + ts, ok := uuid.Time() + if ok { + c := time.Unix(ts.UnixTime()) + want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) + if !c.Equal(want) { + t.Errorf("Got time %v, want %v", c, want) + } + } else { + t.Errorf("%s: bad time\n", uuid) + } + if !bytes.Equal(node, uuid.NodeID()) { + t.Errorf("Expected node %v got %v\n", node, uuid.NodeID()) + } +} + +func TestMD5(t *testing.T) { + uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() + want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" + if uuid != want { + t.Errorf("MD5: got %q expected %q\n", uuid, want) + } +} + +func TestSHA1(t *testing.T) { + uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() + want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" + if uuid != want { + t.Errorf("SHA1: got %q expected %q\n", uuid, want) + } +} + +func TestNodeID(t *testing.T) { + nid := []byte{1, 2, 3, 4, 5, 6} + SetNodeInterface("") + s := NodeInterface() + if s == "" || s == "user" { + t.Errorf("NodeInterface %q after SetInteface\n", s) + } + node1 := NodeID() + if node1 == nil { + t.Errorf("NodeID nil after SetNodeInterface\n", s) + } + SetNodeID(nid) + s = NodeInterface() + if s != "user" { + t.Errorf("Expected NodeInterface %q got %q\n", "user", s) + } + node2 := NodeID() + if node2 == nil { + t.Errorf("NodeID nil after SetNodeID\n", s) + } + if bytes.Equal(node1, node2) { + t.Errorf("NodeID not changed after SetNodeID\n", s) + } else if !bytes.Equal(nid, node2) { + t.Errorf("NodeID is %x, expected %x\n", node2, nid) + } +} + +func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { + if uuid == nil { + t.Errorf("%s failed\n", name) + return + } + if v, _ := uuid.Version(); v != 2 { + t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v) + return + } + if v, ok := uuid.Domain(); !ok || v != domain { + if !ok { + t.Errorf("%s: %d: Domain failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v) + } + } + if v, ok := uuid.Id(); !ok || v != id { + if !ok { + t.Errorf("%s: %d: Id failed\n", name, uuid) + } else { + t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v) + } + } +} + +func TestDCE(t *testing.T) { + testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678) + testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid())) + testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid())) +} + +type badRand struct{} + +func (r badRand) Read(buf []byte) (int, error) { + for i, _ := range buf { + buf[i] = byte(i) + } + return len(buf), nil +} + +func TestBadRand(t *testing.T) { + SetRand(badRand{}) + uuid1 := New() + uuid2 := New() + if uuid1 != uuid2 { + t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2) + } + SetRand(nil) + uuid1 = New() + uuid2 = New() + if uuid1 == uuid2 { + t.Errorf("unexecpted duplicates, got %q\n", uuid1) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go new file mode 100644 index 000000000..63580044b --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], clock_seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go new file mode 100644 index 000000000..b3d4a368d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/keccakf.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/keccakf.go new file mode 100644 index 000000000..76c0312a0 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/keccakf.go @@ -0,0 +1,165 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file implements the core Keccak permutation function necessary for computing SHA3. +// This is implemented in a separate file to allow for replacement by an optimized implementation. +// Nothing in this package is exported. +// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/). + +// rc stores the round constants for use in the ι step. +var rc = [...]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF computes the complete Keccak-f function consisting of 24 rounds with a different +// constant (rc) in each round. This implementation fully unrolls the round function to avoid +// inner loops, as well as pre-calculating shift offsets. +func keccakF(a *[numLanes]uint64) { + var t, bc0, bc1, bc2, bc3, bc4 uint64 + for _, roundConstant := range rc { + // θ step + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + t = bc4 ^ (bc1<<1 ^ bc1>>63) + a[0] ^= t + a[5] ^= t + a[10] ^= t + a[15] ^= t + a[20] ^= t + t = bc0 ^ (bc2<<1 ^ bc2>>63) + a[1] ^= t + a[6] ^= t + a[11] ^= t + a[16] ^= t + a[21] ^= t + t = bc1 ^ (bc3<<1 ^ bc3>>63) + a[2] ^= t + a[7] ^= t + a[12] ^= t + a[17] ^= t + a[22] ^= t + t = bc2 ^ (bc4<<1 ^ bc4>>63) + a[3] ^= t + a[8] ^= t + a[13] ^= t + a[18] ^= t + a[23] ^= t + t = bc3 ^ (bc0<<1 ^ bc0>>63) + a[4] ^= t + a[9] ^= t + a[14] ^= t + a[19] ^= t + a[24] ^= t + + // ρ and π steps + t = a[1] + t, a[10] = a[10], t<<1^t>>(64-1) + t, a[7] = a[7], t<<3^t>>(64-3) + t, a[11] = a[11], t<<6^t>>(64-6) + t, a[17] = a[17], t<<10^t>>(64-10) + t, a[18] = a[18], t<<15^t>>(64-15) + t, a[3] = a[3], t<<21^t>>(64-21) + t, a[5] = a[5], t<<28^t>>(64-28) + t, a[16] = a[16], t<<36^t>>(64-36) + t, a[8] = a[8], t<<45^t>>(64-45) + t, a[21] = a[21], t<<55^t>>(64-55) + t, a[24] = a[24], t<<2^t>>(64-2) + t, a[4] = a[4], t<<14^t>>(64-14) + t, a[15] = a[15], t<<27^t>>(64-27) + t, a[23] = a[23], t<<41^t>>(64-41) + t, a[19] = a[19], t<<56^t>>(64-56) + t, a[13] = a[13], t<<8^t>>(64-8) + t, a[12] = a[12], t<<25^t>>(64-25) + t, a[2] = a[2], t<<43^t>>(64-43) + t, a[20] = a[20], t<<62^t>>(64-62) + t, a[14] = a[14], t<<18^t>>(64-18) + t, a[22] = a[22], t<<39^t>>(64-39) + t, a[9] = a[9], t<<61^t>>(64-61) + t, a[6] = a[6], t<<20^t>>(64-20) + a[1] = t<<44 ^ t>>(64-44) + + // χ step + bc0 = a[0] + bc1 = a[1] + bc2 = a[2] + bc3 = a[3] + bc4 = a[4] + a[0] ^= bc2 &^ bc1 + a[1] ^= bc3 &^ bc2 + a[2] ^= bc4 &^ bc3 + a[3] ^= bc0 &^ bc4 + a[4] ^= bc1 &^ bc0 + bc0 = a[5] + bc1 = a[6] + bc2 = a[7] + bc3 = a[8] + bc4 = a[9] + a[5] ^= bc2 &^ bc1 + a[6] ^= bc3 &^ bc2 + a[7] ^= bc4 &^ bc3 + a[8] ^= bc0 &^ bc4 + a[9] ^= bc1 &^ bc0 + bc0 = a[10] + bc1 = a[11] + bc2 = a[12] + bc3 = a[13] + bc4 = a[14] + a[10] ^= bc2 &^ bc1 + a[11] ^= bc3 &^ bc2 + a[12] ^= bc4 &^ bc3 + a[13] ^= bc0 &^ bc4 + a[14] ^= bc1 &^ bc0 + bc0 = a[15] + bc1 = a[16] + bc2 = a[17] + bc3 = a[18] + bc4 = a[19] + a[15] ^= bc2 &^ bc1 + a[16] ^= bc3 &^ bc2 + a[17] ^= bc4 &^ bc3 + a[18] ^= bc0 &^ bc4 + a[19] ^= bc1 &^ bc0 + bc0 = a[20] + bc1 = a[21] + bc2 = a[22] + bc3 = a[23] + bc4 = a[24] + a[20] ^= bc2 &^ bc1 + a[21] ^= bc3 &^ bc2 + a[22] ^= bc4 &^ bc3 + a[23] ^= bc0 &^ bc4 + a[24] ^= bc1 &^ bc0 + + // ι step + a[0] ^= roundConstant + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3.go new file mode 100644 index 000000000..e1f9aa85c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3.go @@ -0,0 +1,213 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA3 hash algorithm (formerly called Keccak) chosen by NIST in 2012. +// This file provides a SHA3 implementation which implements the standard hash.Hash interface. +// Writing input data, including padding, and reading output data are computed in this file. +// Note that the current implementation can compute the hash of an integral number of bytes only. +// This is a consequence of the hash interface in which a buffer of bytes is passed in. +// The internals of the Keccak-f function are computed in keccakf.go. +// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/). +package sha3 + +import ( + "encoding/binary" + "hash" +) + +// laneSize is the size in bytes of each "lane" of the internal state of SHA3 (5 * 5 * 8). +// Note that changing this size would requires using a type other than uint64 to store each lane. +const laneSize = 8 + +// sliceSize represents the dimensions of the internal state, a square matrix of +// sliceSize ** 2 lanes. This is the size of both the "rows" and "columns" dimensions in the +// terminology of the SHA3 specification. +const sliceSize = 5 + +// numLanes represents the total number of lanes in the state. +const numLanes = sliceSize * sliceSize + +// stateSize is the size in bytes of the internal state of SHA3 (5 * 5 * WSize). +const stateSize = laneSize * numLanes + +// digest represents the partial evaluation of a checksum. +// Note that capacity, and not outputSize, is the critical security parameter, as SHA3 can output +// an arbitrary number of bytes for any given capacity. The Keccak proposal recommends that +// capacity = 2*outputSize to ensure that finding a collision of size outputSize requires +// O(2^{outputSize/2}) computations (the birthday lower bound). Future standards may modify the +// capacity/outputSize ratio to allow for more output with lower cryptographic security. +type digest struct { + a [numLanes]uint64 // main state of the hash + outputSize int // desired output size in bytes + capacity int // number of bytes to leave untouched during squeeze/absorb + absorbed int // number of bytes absorbed thus far +} + +// minInt returns the lesser of two integer arguments, to simplify the absorption routine. +func minInt(v1, v2 int) int { + if v1 <= v2 { + return v1 + } + return v2 +} + +// rate returns the number of bytes of the internal state which can be absorbed or squeezed +// in between calls to the permutation function. +func (d *digest) rate() int { + return stateSize - d.capacity +} + +// Reset clears the internal state by zeroing bytes in the state buffer. +// This can be skipped for a newly-created hash state; the default zero-allocated state is correct. +func (d *digest) Reset() { + d.absorbed = 0 + for i := range d.a { + d.a[i] = 0 + } +} + +// BlockSize, required by the hash.Hash interface, does not have a standard intepretation +// for a sponge-based construction like SHA3. We return the data rate: the number of bytes which +// can be absorbed per invocation of the permutation function. For Merkle-Damgård based hashes +// (ie SHA1, SHA2, MD5) the output size of the internal compression function is returned. +// We consider this to be roughly equivalent because it represents the number of bytes of output +// produced per cryptographic operation. +func (d *digest) BlockSize() int { return d.rate() } + +// Size returns the output size of the hash function in bytes. +func (d *digest) Size() int { + return d.outputSize +} + +// unalignedAbsorb is a helper function for Write, which absorbs data that isn't aligned with an +// 8-byte lane. This requires shifting the individual bytes into position in a uint64. +func (d *digest) unalignedAbsorb(p []byte) { + var t uint64 + for i := len(p) - 1; i >= 0; i-- { + t <<= 8 + t |= uint64(p[i]) + } + offset := (d.absorbed) % d.rate() + t <<= 8 * uint(offset%laneSize) + d.a[offset/laneSize] ^= t + d.absorbed += len(p) +} + +// Write "absorbs" bytes into the state of the SHA3 hash, updating as needed when the sponge +// "fills up" with rate() bytes. Since lanes are stored internally as type uint64, this requires +// converting the incoming bytes into uint64s using a little endian interpretation. This +// implementation is optimized for large, aligned writes of multiples of 8 bytes (laneSize). +// Non-aligned or uneven numbers of bytes require shifting and are slower. +func (d *digest) Write(p []byte) (int, error) { + // An initial offset is needed if the we aren't absorbing to the first lane initially. + offset := d.absorbed % d.rate() + toWrite := len(p) + + // The first lane may need to absorb unaligned and/or incomplete data. + if (offset%laneSize != 0 || len(p) < 8) && len(p) > 0 { + toAbsorb := minInt(laneSize-(offset%laneSize), len(p)) + d.unalignedAbsorb(p[:toAbsorb]) + p = p[toAbsorb:] + offset = (d.absorbed) % d.rate() + + // For every rate() bytes absorbed, the state must be permuted via the F Function. + if (d.absorbed)%d.rate() == 0 { + keccakF(&d.a) + } + } + + // This loop should absorb the bulk of the data into full, aligned lanes. + // It will call the update function as necessary. + for len(p) > 7 { + firstLane := offset / laneSize + lastLane := minInt(d.rate()/laneSize, firstLane+len(p)/laneSize) + + // This inner loop absorbs input bytes into the state in groups of 8, converted to uint64s. + for lane := firstLane; lane < lastLane; lane++ { + d.a[lane] ^= binary.LittleEndian.Uint64(p[:laneSize]) + p = p[laneSize:] + } + d.absorbed += (lastLane - firstLane) * laneSize + // For every rate() bytes absorbed, the state must be permuted via the F Function. + if (d.absorbed)%d.rate() == 0 { + keccakF(&d.a) + } + + offset = 0 + } + + // If there are insufficient bytes to fill the final lane, an unaligned absorption. + // This should always start at a correct lane boundary though, or else it would be caught + // by the uneven opening lane case above. + if len(p) > 0 { + d.unalignedAbsorb(p) + } + + return toWrite, nil +} + +// pad computes the SHA3 padding scheme based on the number of bytes absorbed. +// The padding is a 1 bit, followed by an arbitrary number of 0s and then a final 1 bit, such that +// the input bits plus padding bits are a multiple of rate(). Adding the padding simply requires +// xoring an opening and closing bit into the appropriate lanes. +func (d *digest) pad() { + offset := d.absorbed % d.rate() + // The opening pad bit must be shifted into position based on the number of bytes absorbed + padOpenLane := offset / laneSize + d.a[padOpenLane] ^= 0x0000000000000001 << uint(8*(offset%laneSize)) + // The closing padding bit is always in the last position + padCloseLane := (d.rate() / laneSize) - 1 + d.a[padCloseLane] ^= 0x8000000000000000 +} + +// finalize prepares the hash to output data by padding and one final permutation of the state. +func (d *digest) finalize() { + d.pad() + keccakF(&d.a) +} + +// squeeze outputs an arbitrary number of bytes from the hash state. +// Squeezing can require multiple calls to the F function (one per rate() bytes squeezed), +// although this is not the case for standard SHA3 parameters. This implementation only supports +// squeezing a single time, subsequent squeezes may lose alignment. Future implementations +// may wish to support multiple squeeze calls, for example to support use as a PRNG. +func (d *digest) squeeze(in []byte, toSqueeze int) []byte { + // Because we read in blocks of laneSize, we need enough room to read + // an integral number of lanes + needed := toSqueeze + (laneSize-toSqueeze%laneSize)%laneSize + if cap(in)-len(in) < needed { + newIn := make([]byte, len(in), len(in)+needed) + copy(newIn, in) + in = newIn + } + out := in[len(in) : len(in)+needed] + + for len(out) > 0 { + for i := 0; i < d.rate() && len(out) > 0; i += laneSize { + binary.LittleEndian.PutUint64(out[:], d.a[i/laneSize]) + out = out[laneSize:] + } + if len(out) > 0 { + keccakF(&d.a) + } + } + return in[:len(in)+toSqueeze] // Re-slice in case we wrote extra data. +} + +// Sum applies padding to the hash state and then squeezes out the desired nubmer of output bytes. +func (d *digest) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing and summing. + dup := *d + dup.finalize() + return dup.squeeze(in, dup.outputSize) +} + +// The NewKeccakX constructors enable initializing a hash in any of the four recommend sizes +// from the Keccak specification, all of which set capacity=2*outputSize. Note that the final +// NIST standard for SHA3 may specify different input/output lengths. +// The output size is indicated in bits but converted into bytes internally. +func NewKeccak224() hash.Hash { return &digest{outputSize: 224 / 8, capacity: 2 * 224 / 8} } +func NewKeccak256() hash.Hash { return &digest{outputSize: 256 / 8, capacity: 2 * 256 / 8} } +func NewKeccak384() hash.Hash { return &digest{outputSize: 384 / 8, capacity: 2 * 384 / 8} } +func NewKeccak512() hash.Hash { return &digest{outputSize: 512 / 8, capacity: 2 * 512 / 8} } diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3_test.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3_test.go new file mode 100644 index 000000000..05e7c9589 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3/sha3_test.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// These tests are a subset of those provided by the Keccak web site(http://keccak.noekeon.org/). + +import ( + "bytes" + "encoding/hex" + "fmt" + "hash" + "strings" + "testing" +) + +// testDigests maintains a digest state of each standard type. +var testDigests = map[string]*digest{ + "Keccak224": {outputSize: 224 / 8, capacity: 2 * 224 / 8}, + "Keccak256": {outputSize: 256 / 8, capacity: 2 * 256 / 8}, + "Keccak384": {outputSize: 384 / 8, capacity: 2 * 384 / 8}, + "Keccak512": {outputSize: 512 / 8, capacity: 2 * 512 / 8}, +} + +// testVector represents a test input and expected outputs from multiple algorithm variants. +type testVector struct { + desc string + input []byte + repeat int // input will be concatenated the input this many times. + want map[string]string +} + +// decodeHex converts an hex-encoded string into a raw byte string. +func decodeHex(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +// shortTestVectors stores a series of short testVectors. +// Inputs of 8, 248, and 264 bits from http://keccak.noekeon.org/ are included below. +// The standard defines additional test inputs of all sizes between 0 and 2047 bits. +// Because the current implementation can only handle an integral number of bytes, +// most of the standard test inputs can't be used. +var shortKeccakTestVectors = []testVector{ + { + desc: "short-8b", + input: decodeHex("CC"), + repeat: 1, + want: map[string]string{ + "Keccak224": "A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802", + "Keccak256": "EEAD6DBFC7340A56CAEDC044696A168870549A6A7F6F56961E84A54BD9970B8A", + "Keccak384": "1B84E62A46E5A201861754AF5DC95C4A1A69CAF4A796AE405680161E29572641F5FA1E8641D7958336EE7B11C58F73E9", + "Keccak512": "8630C13CBD066EA74BBE7FE468FEC1DEE10EDC1254FB4C1B7C5FD69B646E44160B8CE01D05A0908CA790DFB080F4B513BC3B6225ECE7A810371441A5AC666EB9", + }, + }, + { + desc: "short-248b", + input: decodeHex("84FB51B517DF6C5ACCB5D022F8F28DA09B10232D42320FFC32DBECC3835B29"), + repeat: 1, + want: map[string]string{ + "Keccak224": "81AF3A7A5BD4C1F948D6AF4B96F93C3B0CF9C0E7A6DA6FCD71EEC7F6", + "Keccak256": "D477FB02CAAA95B3280EC8EE882C29D9E8A654B21EF178E0F97571BF9D4D3C1C", + "Keccak384": "503DCAA4ADDA5A9420B2E436DD62D9AB2E0254295C2982EF67FCE40F117A2400AB492F7BD5D133C6EC2232268BC27B42", + "Keccak512": "9D8098D8D6EDBBAA2BCFC6FB2F89C3EAC67FEC25CDFE75AA7BD570A648E8C8945FF2EC280F6DCF73386109155C5BBC444C707BB42EAB873F5F7476657B1BC1A8", + }, + }, + { + desc: "short-264b", + input: decodeHex("DE8F1B3FAA4B7040ED4563C3B8E598253178E87E4D0DF75E4FF2F2DEDD5A0BE046"), + repeat: 1, + want: map[string]string{ + "Keccak224": "F217812E362EC64D4DC5EACFABC165184BFA456E5C32C2C7900253D0", + "Keccak256": "E78C421E6213AFF8DE1F025759A4F2C943DB62BBDE359C8737E19B3776ED2DD2", + "Keccak384": "CF38764973F1EC1C34B5433AE75A3AAD1AAEF6AB197850C56C8617BCD6A882F6666883AC17B2DCCDBAA647075D0972B5", + "Keccak512": "9A7688E31AAF40C15575FC58C6B39267AAD3722E696E518A9945CF7F7C0FEA84CB3CB2E9F0384A6B5DC671ADE7FB4D2B27011173F3EEEAF17CB451CF26542031", + }, + }, +} + +// longTestVectors stores longer testVectors (currently only one). +// The computed test vector is 64 MiB long and is a truncated version of the +// ExtremelyLongMsgKAT taken from http://keccak.noekeon.org/. +var longKeccakTestVectors = []testVector{ + { + desc: "long-64MiB", + input: []byte("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"), + repeat: 1024 * 1024, + want: map[string]string{ + "Keccak224": "50E35E40980FEEFF1EA490957B0E970257F75EA0D410EE0F0B8A7A58", + "Keccak256": "5015A4935F0B51E091C6550A94DCD262C08998232CCAA22E7F0756DEAC0DC0D0", + "Keccak384": "7907A8D0FAA7BC6A90FE14C6C958C956A0877E751455D8F13ACDB96F144B5896E716C06EC0CB56557A94EF5C3355F6F3", + "Keccak512": "3EC327D6759F769DEB74E80CA70C831BC29CAB048A4BF4190E4A1DD5C6507CF2B4B58937FDE81D36014E7DFE1B1DD8B0F27CB7614F9A645FEC114F1DAAEFC056", + }, + }, +} + +// TestKeccakVectors checks that correct output is produced for a set of known testVectors. +func TestKeccakVectors(t *testing.T) { + testCases := append([]testVector{}, shortKeccakTestVectors...) + if !testing.Short() { + testCases = append(testCases, longKeccakTestVectors...) + } + for _, tc := range testCases { + for alg, want := range tc.want { + d := testDigests[alg] + d.Reset() + for i := 0; i < tc.repeat; i++ { + d.Write(tc.input) + } + got := strings.ToUpper(hex.EncodeToString(d.Sum(nil))) + if got != want { + t.Errorf("%s, alg=%s\ngot %q, want %q", tc.desc, alg, got, want) + } + } + } +} + +// dumpState is a debugging function to pretty-print the internal state of the hash. +func (d *digest) dumpState() { + fmt.Printf("SHA3 hash, %d B output, %d B capacity (%d B rate)\n", d.outputSize, d.capacity, d.rate()) + fmt.Printf("Internal state after absorbing %d B:\n", d.absorbed) + + for x := 0; x < sliceSize; x++ { + for y := 0; y < sliceSize; y++ { + fmt.Printf("%v, ", d.a[x*sliceSize+y]) + } + fmt.Println("") + } +} + +// TestUnalignedWrite tests that writing data in an arbitrary pattern with small input buffers. +func TestUnalignedWrite(t *testing.T) { + buf := sequentialBytes(0x10000) + for alg, d := range testDigests { + d.Reset() + d.Write(buf) + want := d.Sum(nil) + d.Reset() + for i := 0; i < len(buf); { + // Cycle through offsets which make a 137 byte sequence. + // Because 137 is prime this sequence should exercise all corner cases. + offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1} + for _, j := range offsets { + j = minInt(j, len(buf)-i) + d.Write(buf[i : i+j]) + i += j + } + } + got := d.Sum(nil) + if !bytes.Equal(got, want) { + t.Errorf("Unaligned writes, alg=%s\ngot %q, want %q", alg, got, want) + } + } +} + +func TestAppend(t *testing.T) { + d := NewKeccak224() + + for capacity := 2; capacity < 64; capacity += 64 { + // The first time around the loop, Sum will have to reallocate. + // The second time, it will not. + buf := make([]byte, 2, capacity) + d.Reset() + d.Write([]byte{0xcc}) + buf = d.Sum(buf) + expected := "0000A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802" + if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected { + t.Errorf("got %s, want %s", got, expected) + } + } +} + +func TestAppendNoRealloc(t *testing.T) { + buf := make([]byte, 1, 200) + d := NewKeccak224() + d.Write([]byte{0xcc}) + buf = d.Sum(buf) + expected := "00A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802" + if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected { + t.Errorf("got %s, want %s", got, expected) + } +} + +// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing. +func sequentialBytes(size int) []byte { + result := make([]byte, size) + for i := range result { + result[i] = byte(i) + } + return result +} + +// benchmarkBlockWrite tests the speed of writing data and never calling the permutation function. +func benchmarkBlockWrite(b *testing.B, d *digest) { + b.StopTimer() + d.Reset() + // Write all but the last byte of a block, to ensure that the permutation is not called. + data := sequentialBytes(d.rate() - 1) + b.SetBytes(int64(len(data))) + b.StartTimer() + for i := 0; i < b.N; i++ { + d.absorbed = 0 // Reset absorbed to avoid ever calling the permutation function + d.Write(data) + } + b.StopTimer() + d.Reset() +} + +// BenchmarkPermutationFunction measures the speed of the permutation function with no input data. +func BenchmarkPermutationFunction(b *testing.B) { + b.SetBytes(int64(stateSize)) + var lanes [numLanes]uint64 + for i := 0; i < b.N; i++ { + keccakF(&lanes) + } +} + +// BenchmarkSingleByteWrite tests the latency from writing a single byte +func BenchmarkSingleByteWrite(b *testing.B) { + b.StopTimer() + d := testDigests["Keccak512"] + d.Reset() + data := sequentialBytes(1) //1 byte buffer + b.SetBytes(int64(d.rate()) - 1) + b.StartTimer() + for i := 0; i < b.N; i++ { + d.absorbed = 0 // Reset absorbed to avoid ever calling the permutation function + + // Write all but the last byte of a block, one byte at a time. + for j := 0; j < d.rate()-1; j++ { + d.Write(data) + } + } + b.StopTimer() + d.Reset() +} + +// BenchmarkSingleByteX measures the block write speed for each size of the digest. +func BenchmarkBlockWrite512(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak512"]) } +func BenchmarkBlockWrite384(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak384"]) } +func BenchmarkBlockWrite256(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak256"]) } +func BenchmarkBlockWrite224(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak224"]) } + +// benchmarkBulkHash tests the speed to hash a 16 KiB buffer. +func benchmarkBulkHash(b *testing.B, h hash.Hash) { + b.StopTimer() + h.Reset() + size := 1 << 14 + data := sequentialBytes(size) + b.SetBytes(int64(size)) + b.StartTimer() + + var digest []byte + for i := 0; i < b.N; i++ { + h.Write(data) + digest = h.Sum(digest[:0]) + } + b.StopTimer() + h.Reset() +} + +// benchmarkBulkKeccakX test the speed to hash a 16 KiB buffer by calling benchmarkBulkHash. +func BenchmarkBulkKeccak512(b *testing.B) { benchmarkBulkHash(b, NewKeccak512()) } +func BenchmarkBulkKeccak384(b *testing.B) { benchmarkBulkHash(b, NewKeccak384()) } +func BenchmarkBulkKeccak256(b *testing.B) { benchmarkBulkHash(b, NewKeccak256()) } +func BenchmarkBulkKeccak224(b *testing.B) { benchmarkBulkHash(b, NewKeccak224()) } diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/Makefile b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/Makefile new file mode 100644 index 000000000..e99b839a7 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/Makefile @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# http://code.google.com/p/goprotobuf/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install && cd testdata && make diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/all_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/all_test.go new file mode 100644 index 000000000..b8e9fa004 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/all_test.go @@ -0,0 +1,1948 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f fakeMarshaler) Marshal() ([]byte, error) { + return f.b, f.err +} + +func (f fakeMarshaler) String() string { + return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) +} + +func (f fakeMarshaler) ProtoMessage() {} + +func (f fakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that succeeds", + m: fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + if err := o.Marshal(pb); err != ErrRepeatedHasNil { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if strings.Index(err.Error(), "Label") < 0 { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if strings.Index(err.Error(), "{Unknown}") < 0 { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + _, err := Marshal((*GoEnum)(nil)) + if err != ErrNil { + t.Errorf("Marshal: got err %v, want ErrNil", err) + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf(" got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone.go new file mode 100644 index 000000000..b4b5c0588 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone.go @@ -0,0 +1,174 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy. +// TODO: MessageSet and RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i)) + } + + if emIn, ok := in.Addr().Interface().(extensionsMap); ok { + emOut := out.Addr().Interface().(extensionsMap) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +func mergeAny(out, in reflect.Value) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(in) + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem()) + case reflect.Slice: + if in.IsNil() { + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + case reflect.Uint8: + // []byte is a scalar bytes field. + out.Set(in) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i)) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value)) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone_test.go new file mode 100644 index 000000000..3802c6633 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/clone_test.go @@ -0,0 +1,186 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" + + pb "./testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode.go new file mode 100644 index 000000000..9714104a1 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode.go @@ -0,0 +1,726 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d", st, tag) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + if ee, ok := e.(extensionsMap); ok { + ext := ee.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + ee.ExtensionMap()[int32(tag)] = ext + } else if ee, ok := e.(extensionsBytes); ok { + ext := ee.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + } + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + sp := new(string) + *sp = s + *structPointer_String(base, p.field) = sp + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + + y := *v + for i := 0; i < nb; i++ { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode_gogo.go new file mode 100644 index 000000000..1161dbd54 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/decode_gogo.go @@ -0,0 +1,220 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a bool pointer. +func (o *Buffer) dec_ref_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_RefBool(base, p.field) = o.bools[0] + o.bools = o.bools[1:] + return nil +} + +// Decode a reference to an int32 pointer. +func (o *Buffer) dec_ref_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + refWord32_Set(structPointer_RefWord32(base, p.field), o, uint32(u)) + return nil +} + +// Decode a reference to an int64 pointer. +func (o *Buffer) dec_ref_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + refWord64_Set(structPointer_RefWord64(base, p.field), o, u) + return nil +} + +// Decode a reference to a string pointer. +func (o *Buffer) dec_ref_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_RefString(base, p.field) = s + return nil +} + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + l := 1 + size := reflect.TypeOf(value).Elem().Size() + if kind == reflect.Array { + l = reflect.TypeOf(value).Elem().Len() + size = reflect.TypeOf(value).Size() + } + total := int(size) * l + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + setCustomType(newBas, 0, custom) + + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode.go new file mode 100644 index 000000000..2d3e03f69 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode.go @@ -0,0 +1,961 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // ErrRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + ErrRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(t.Elem(), GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(t.Elem(), GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.stype, p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.stype, p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.stype, p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.stype, p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return ErrRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.stype, p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return ErrRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.stype, p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return ErrRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.stype, p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return ErrRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.stype, p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a struct. +func (o *Buffer) enc_struct(t reflect.Type, prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // http://code.google.com/apis/protocolbuffers/docs/encoding.html#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(t reflect.Type, prop *StructProperties, base structPointer, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := o.enc_struct(t, prop, base) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode_gogo.go new file mode 100644 index 000000000..d5d7017aa --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/encode_gogo.go @@ -0,0 +1,361 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := structPointer_RefBool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + v := structPointer_RefBool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_RefWord32(base, p.field) + if refWord32_IsNil(v) { + return ErrNil + } + x := refWord32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_RefWord32(base, p.field) + if refWord32_IsNil(v) { + return 0 + } + x := refWord32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_RefWord64(base, p.field) + if refWord64_IsNil(v) { + return ErrNil + } + x := refWord64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_RefWord64(base, p.field) + if refWord64_IsNil(v) { + return 0 + } + x := refWord64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := structPointer_RefString(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := structPointer_RefString(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.stype, p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.stype, p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_GetStructPointer(base, p.field) + ss1 := structPointer_GetRefStructPointer(ss, field(0)) + size := p.stype.Size() + l := structPointer_Len(base, p.field) + for i := 0; i < l; i++ { + structp := structPointer_Add(ss1, field(uintptr(i)*size)) + if structPointer_IsNil(structp) { + return ErrRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.stype, p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return ErrRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_GetStructPointer(base, p.field) + ss1 := structPointer_GetRefStructPointer(ss, field(0)) + size := p.stype.Size() + l := structPointer_Len(base, p.field) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := structPointer_Add(ss1, field(uintptr(i)*size)) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.stype, p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal.go new file mode 100644 index 000000000..42542e692 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal.go @@ -0,0 +1,241 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. +// TODO: MessageSet. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. (TODO) + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal_test.go new file mode 100644 index 000000000..c9e9ad708 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/equal_test.go @@ -0,0 +1,166 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + pb "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions.go new file mode 100644 index 000000000..3749958ba --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions.go @@ -0,0 +1,460 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +type extensionsMap interface { + extendableProto + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + extendableProto + GetExtensions() *[]byte +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + if ebase, ok := base.(extensionsMap); ok { + ebase.ExtensionMap()[id] = Extension{enc: b} + } else if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + } else { + panic("unreachable") + } +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + if epb, doki := pb.(extensionsMap); doki { + _, ok := epb.ExtensionMap()[extension.Field] + return ok + } else if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + panic("unreachable") +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +func clearExtension(pb extendableProto, fieldNum int32) { + if epb, doki := pb.(extensionsMap); doki { + delete(epb.ExtensionMap(), fieldNum) + } else if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + } else { + panic("unreachable") + } +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + clearExtension(pb, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + if epb, doki := pb.(extensionsMap); doki { + e, ok := epb.ExtensionMap()[extension.Field] + if !ok { + return nil, ErrMissingExtension + } + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + return e.value, nil + } else if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + } + panic("unreachable") +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + + if epb, doki := pb.(extensionsMap); doki { + epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + } else if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + } + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..8f7eb8264 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_gogo.go @@ -0,0 +1,189 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" +) + +func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return sizeExtensionMap(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func (this Extension) GoString() string { + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_test.go new file mode 100644 index 000000000..55ba73250 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/extensions_test.go @@ -0,0 +1,60 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + pb "./testdata" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib.go new file mode 100644 index 000000000..e70c0fddc --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib.go @@ -0,0 +1,740 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + Package proto converts data structures to and from the wire format of + protocol buffers. It works in concert with the Go source code generated + for .proto files by the protocol compiler. + + A summary of the properties of the protocol buffer interface + for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Marshal and Unmarshal are functions to encode and decode the wire format. + + The simplest way to describe this is to see an example. + Given file test.proto, containing + + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } + + The resulting file, test.pb.go, is: + + package example + + import "code.google.com/p/gogoprotobuf/proto" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (this *Test) Reset() { *this = Test{} } + func (this *Test) String() string { return proto.CompactTextString(this) } + const Default_Test_Type int32 = 77 + + func (this *Test) GetLabel() string { + if this != nil && this.Label != nil { + return *this.Label + } + return "" + } + + func (this *Test) GetType() int32 { + if this != nil && this.Type != nil { + return *this.Type + } + return Default_Test_Type + } + + func (this *Test) GetOptionalgroup() *Test_OptionalGroup { + if this != nil { + return this.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} } + func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) } + + func (this *Test_OptionalGroup) GetRequiredField() string { + if this != nil && this.RequiredField != nil { + return *this.RequiredField + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + + To create and play with a Test object: + + package main + + import ( + "log" + + "code.google.com/p/gogoprotobuf/proto" + "./example.pb" + ) + + func main() { + test := &example.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &example.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := new(example.Test) + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + p := new(uint32) + *p = v + return p +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (o *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := o.buf + index := o.index + o.buf = b + o.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := o.index + if index == len(o.buf) { + break + } + + op, err := o.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = o.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = o.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = o.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + break + + case WireVarint: + u, err = o.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + if err != nil { + fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + if err != nil { + fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth) + } + fmt.Printf("\n") + + o.buf = obuf + o.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + if f.IsNil() { + continue + } + // f is *T or []*T + if f.Kind() == reflect.Ptr { + setDefaults(f, recur, zeros) + } else { + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +func ptrToStruct(t reflect.Type) bool { + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + // nested messages + if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) { + dm.nested = append(dm.nested, fi) + continue + } + + sf := scalarField{ + index: fi, + kind: ft.Elem().Kind(), + } + + // scalar fields without defaults + if prop.Default == "" { + dm.scalars = append(dm.scalars, sf) + continue + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + log.Printf("proto: bad default bool %q: %v", prop.Default, err) + continue + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + log.Printf("proto: bad default float32 %q: %v", prop.Default, err) + continue + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + log.Printf("proto: bad default float64 %q: %v", prop.Default, err) + continue + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + log.Printf("proto: bad default int32 %q: %v", prop.Default, err) + continue + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + log.Printf("proto: bad default int64 %q: %v", prop.Default, err) + continue + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + log.Printf("proto: bad default uint32 %q: %v", prop.Default, err) + continue + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + log.Printf("proto: bad default uint64 %q: %v", prop.Default, err) + continue + } + sf.value = x + default: + log.Printf("proto: unhandled def kind %v", ft.Elem().Kind()) + continue + } + + dm.scalars = append(dm.scalars, sf) + } + + return dm +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib_gogo.go new file mode 100644 index 000000000..06278e7f3 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/lib_gogo.go @@ -0,0 +1,40 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/message_set.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/message_set.go new file mode 100644 index 000000000..6ddcc30ad --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/message_set.go @@ -0,0 +1,216 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" + "reflect" + "sort" +) + +// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and MessageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. +// +// When a proto1 proto has a field that looks like: +// optional message info = 3; +// the protocol compiler produces a field in the generated struct that looks like: +// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` +// The package is automatically inserted so there is no need for that proto file to +// import this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type MessageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure MessageSet is a Message. +var _ Message = (*MessageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *MessageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *MessageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *MessageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return ErrNoMessageTypeId + } + return nil // TODO: return error instead? +} + +func (ms *MessageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return ErrNoMessageTypeId + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *MessageSet) Reset() { *ms = MessageSet{} } +func (ms *MessageSet) String() string { return CompactTextString(ms) } +func (*MessageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(MessageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + // restore wire type and field number varint, plus length varint. + b := EncodeVarint(uint64(*item.TypeId)<<3 | WireBytes) + b = append(b, EncodeVarint(uint64(len(item.Message)))...) + b = append(b, item.Message...) + + m[*item.TypeId] = Extension{enc: b} + } + return nil +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(i messageTypeIder, name string) { + messageSetMap[i.MessageTypeId()] = messageSetDesc{ + t: reflect.TypeOf(i), + name: name, + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..61141ba85 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_reflect.go @@ -0,0 +1,384 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine,!appenginevm + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..27a536c88 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe.go @@ -0,0 +1,218 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine appenginevm + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..befeeed68 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,166 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + oldHeader := structPointer_GetSliceHeader(base, f) + newLen := oldHeader.Len + 1 + slice := reflect.MakeSlice(typ, newLen, newLen) + bas := toStructPointer(slice) + for i := 0; i < oldHeader.Len; i++ { + newElemptr := uintptr(bas) + uintptr(i)*size + oldElemptr := oldHeader.Data + uintptr(i)*size + copyUintPtr(oldElemptr, newElemptr, int(size)) + } + + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +// RefBool returns a *bool field in the struct. +func structPointer_RefBool(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// RefString returns the address of a string field in the struct. +func structPointer_RefString(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} + +// refWord32 is the address of a 32-bit value field. +type refWord32 *uint32 + +func refWord32_IsNil(p refWord32) bool { + return p == nil +} + +func refWord32_Set(p refWord32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +func refWord32_Get(p refWord32) uint32 { + return *p +} + +func structPointer_RefWord32(p structPointer, f field) refWord32 { + return refWord32((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// refWord64 is like refWord32 but for 32-bit values. +type refWord64 *uint64 + +func refWord64_Set(p refWord64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func refWord64_IsNil(p refWord64) bool { + return p == nil +} + +func refWord64_Get(p refWord64) uint64 { + return *p +} + +func structPointer_RefWord64(p structPointer, f field) refWord64 { + return refWord64((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties.go new file mode 100644 index 000000000..d77530b7f --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties.go @@ -0,0 +1,670 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + Default string // default value + CustomType string + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Default) > 0 { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case strings.HasPrefix(f, "def="): + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + if len(p.CustomType) > 0 { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + if !p.setNonNullableEncAndDec(t1) { + fmt.Fprintf(os.Stderr, "proto: no coders for %T\n", t1) + } + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %T -> %T\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32, reflect.Uint32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_int32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch t2.Bits() { + case 32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case 8: + if t2.Kind() == reflect.Uint8 { + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + } + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, lockGetProp) +} + +var ( + mutex sync.Mutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +func GetProperties(t reflect.Type) *StructProperties { + mutex.Lock() + sprop := getPropertiesLocked(t) + mutex.Unlock() + return sprop +} + +// getPropertiesLocked requires that mutex is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties_gogo.go new file mode 100644 index 000000000..08498e6dc --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/properties_gogo.go @@ -0,0 +1,107 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setNonNullableEncAndDec(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Bool: + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_ref_bool + p.size = size_ref_bool + case reflect.Int32, reflect.Uint32: + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_ref_int32 + p.size = size_ref_int32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_ref_int64 + p.size = size_ref_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_ref_int32 // can just treat them as bits + p.dec = (*Buffer).dec_ref_int32 + p.size = size_ref_int32 + case reflect.Float64: + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_ref_int64 + p.size = size_ref_int64 + case reflect.String: + p.dec = (*Buffer).dec_ref_string + p.enc = (*Buffer).enc_ref_string + p.size = size_ref_string + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + default: + return false + } + return true +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size2_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size2_test.go new file mode 100644 index 000000000..55902a4a9 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size_test.go new file mode 100644 index 000000000..f3a04e66d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/size_test.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "testing" + + pb "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/skip_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/skip_gogo.go new file mode 100644 index 000000000..31010d5c0 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/skip_gogo.go @@ -0,0 +1,117 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var wire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + if wireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/Makefile b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/Makefile new file mode 100644 index 000000000..4cdf08456 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/Makefile @@ -0,0 +1,47 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# http://code.google.com/p/goprotobuf/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: regenerate + +regenerate: + rm -f test.pb.go + protoc --gogo_out=. test.proto + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + hg diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/golden_test.go new file mode 100644 index 000000000..5a8f7ef3d --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--gogo_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go new file mode 100644 index 000000000..3e5b974cb --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go @@ -0,0 +1,2324 @@ +// Code generated by protoc-gen-gogo. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + MyMessage + Ext + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint +*/ +package testdata + +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go.golden b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go.golden new file mode 100644 index 000000000..b79ce68e1 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.pb.go.golden @@ -0,0 +1,1737 @@ +// Code generated by protoc-gen-gogo. +// source: test.proto +// DO NOT EDIT! + +package testdata + +import proto "code.google.com/p/gogoprotobuf/proto" +import json "encoding/json" +import math "math" + +import () + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x FOO) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + GoTest_TABLE GoTest_KIND = 11 + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x GoTest_KIND) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x MyMessage_Color) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x Defaults_Color) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x RepeatedEnum_Color) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return 0 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return 0 +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return 0 +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,1,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,1,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.proto b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.proto new file mode 100644 index 000000000..4f4b3d168 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/testdata/test.proto @@ -0,0 +1,420 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text.go new file mode 100644 index 000000000..e88badd72 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text.go @@ -0,0 +1,736 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "math" + "os" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +// textMarshaler is implemented by Messages that can marshal themsleves. +// It is identical to encoding.TextMarshaler, introduced in go 1.2, +// which will eventually replace it. +type textMarshaler interface { + MarshalText() (text []byte, err error) +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +var ( + messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() +) + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + if sv.Type() == messageSetType { + return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) + } + + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := writeEnum(w, v, props); err != nil { + return err + } + } else if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := writeEnum(w, fv, props); err != nil { + return err + } + } else if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil && len(props.CustomType) > 0 { + var custom Marshaler = v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(textMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeMessageSet(w *textWriter, ms *MessageSet) error { + for _, item := range ms.Item { + id := *item.TypeId + if msd, ok := messageSetMap[id]; ok { + // Known message set type. + if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { + return err + } + w.indent() + + pb := reflect.New(msd.t.Elem()) + if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { + if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { + return err + } + } else { + if err := writeStruct(w, pb.Elem()); err != nil { + return err + } + } + } else { + // Unknown type. + if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { + return err + } + w.indent() + if err := writeUnknownStruct(w, item.Message); err != nil { + return err + } + } + w.unindent() + if _, err := w.Write(gtNewline); err != nil { + return err + } + } + return nil +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + var m map[int32]Extension + if em, ok := ep.(extensionsMap); ok { + m = em.ExtensionMap() + } else if em, ok := ep.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + } + + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { + return err + } + continue + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(textMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_gogo.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_gogo.go new file mode 100644 index 000000000..3c4e469b2 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_gogo.go @@ -0,0 +1,55 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser.go new file mode 100644 index 000000000..37be7c9a6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser.go @@ -0,0 +1,727 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://code.google.com/p/gogoprotobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// textUnmarshaler is implemented by Messages that can unmarshal themsleves. +// It is identical to encoding.TextUnmarshaler, introduced in go 1.2, +// which will eventually replace it. +type textUnmarshaler interface { + UnmarshalText(text []byte) error +} + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %v", p.s[0:i+1]) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +// Return an error indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *ParseError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return p.errorf("message %v missing required field %q", st, props.OrigName) + } + } + return p.errorf("message %v missing required field", st) // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { + sprops := GetProperties(st) + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError { + st := sv.Type() + reqCount := GetProperties(st).reqCount + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + return err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + } else { + // This is a normal, non-extension field. + fi, props, ok := structFieldByName(st, tok.value) + if !ok { + return p.errorf("unknown field name %q in %v", tok.value, st) + } + + dst := sv.Field(fi) + isDstNil := isNil(dst) + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && !isDstNil && dst.Kind() == reflect.Ptr { + return p.errorf("non-repeated field %q was repeated", tok.value) + } + + if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + return err + } + + // Parse into the field. + if err := p.readAny(dst, props); err != nil { + return err + } + + if props.Required { + reqCount-- + } + } + + // For backward compatibility, permit a semicolon or comma after a field. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. May already exist. + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(at, flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement textUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(textUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser_test.go new file mode 100644 index 000000000..2c9eaafc3 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_parser_test.go @@ -0,0 +1,462 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0"`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: ``, + err: `line 1.0: message testdata.MyMessage missing required field "count"`, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_test.go b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_test.go new file mode 100644 index 000000000..7a64e9e14 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto/text_test.go @@ -0,0 +1,408 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" + + pb "./testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + { + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile new file mode 100644 index 000000000..e99b839a7 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# http://code.google.com/p/goprotobuf/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install && cd testdata && make diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go new file mode 100644 index 000000000..baac76aae --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go @@ -0,0 +1,1979 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f fakeMarshaler) Marshal() ([]byte, error) { + return f.b, f.err +} + +func (f fakeMarshaler) String() string { + return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) +} + +func (f fakeMarshaler) ProtoMessage() {} + +func (f fakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that succeeds", + m: fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + if err := o.Marshal(pb); err != ErrRepeatedHasNil { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if strings.Index(err.Error(), "Label") < 0 { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if strings.Index(err.Error(), "{Unknown}") < 0 { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + _, err := Marshal((*GoEnum)(nil)) + if err != ErrNil { + t.Errorf("Marshal: got err %v, want ErrNil", err) + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go new file mode 100644 index 000000000..8b94d9e18 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go @@ -0,0 +1,169 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy. +// TODO: MessageSet and RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i)) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +func mergeAny(out, in reflect.Value) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(in) + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem()) + case reflect.Slice: + if in.IsNil() { + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + case reflect.Uint8: + // []byte is a scalar bytes field. + out.Set(in) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i)) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value)) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go new file mode 100644 index 000000000..81a6b8ced --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go @@ -0,0 +1,186 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + + pb "./testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go new file mode 100644 index 000000000..49a487a37 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go @@ -0,0 +1,721 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d", st, tag) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + sp := new(string) + *sp = s + *structPointer_String(base, p.field) = sp + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + + y := *v + for i := 0; i < nb; i++ { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go new file mode 100644 index 000000000..35e5ad56e --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go @@ -0,0 +1,1054 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // ErrRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + ErrRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(t.Elem(), GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(t.Elem(), GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.stype, p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.stype, p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.stype, p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.stype, p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return ErrRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.stype, p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return ErrRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.stype, p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return ErrRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.stype, p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return ErrRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.stype, p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a struct. +func (o *Buffer) enc_struct(t reflect.Type, prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // http://code.google.com/apis/protocolbuffers/docs/encoding.html#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(t reflect.Type, prop *StructProperties, base structPointer, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := o.enc_struct(t, prop, base) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go new file mode 100644 index 000000000..42542e692 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go @@ -0,0 +1,241 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. +// TODO: MessageSet. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. (TODO) + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go new file mode 100644 index 000000000..f588d0811 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go @@ -0,0 +1,166 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + pb "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go new file mode 100644 index 000000000..e592053c5 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go @@ -0,0 +1,351 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + e, ok := pb.ExtensionMap()[extension.Field] + if !ok { + return nil, ErrMissingExtension + } + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + return e.value, nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go new file mode 100644 index 000000000..96cae2e4c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go @@ -0,0 +1,60 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + pb "./testdata" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go new file mode 100644 index 000000000..46a4416fd --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go @@ -0,0 +1,740 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + Package proto converts data structures to and from the wire format of + protocol buffers. It works in concert with the Go source code generated + for .proto files by the protocol compiler. + + A summary of the properties of the protocol buffer interface + for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Marshal and Unmarshal are functions to encode and decode the wire format. + + The simplest way to describe this is to see an example. + Given file test.proto, containing + + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } + + The resulting file, test.pb.go, is: + + package example + + import "code.google.com/p/goprotobuf/proto" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (this *Test) Reset() { *this = Test{} } + func (this *Test) String() string { return proto.CompactTextString(this) } + const Default_Test_Type int32 = 77 + + func (this *Test) GetLabel() string { + if this != nil && this.Label != nil { + return *this.Label + } + return "" + } + + func (this *Test) GetType() int32 { + if this != nil && this.Type != nil { + return *this.Type + } + return Default_Test_Type + } + + func (this *Test) GetOptionalgroup() *Test_OptionalGroup { + if this != nil { + return this.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} } + func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) } + + func (this *Test_OptionalGroup) GetRequiredField() string { + if this != nil && this.RequiredField != nil { + return *this.RequiredField + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + + To create and play with a Test object: + + package main + + import ( + "log" + + "code.google.com/p/goprotobuf/proto" + "./example.pb" + ) + + func main() { + test := &example.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &example.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := new(example.Test) + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + p := new(uint32) + *p = v + return p +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (o *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := o.buf + index := o.index + o.buf = b + o.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := o.index + if index == len(o.buf) { + break + } + + op, err := o.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = o.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = o.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = o.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + break + + case WireVarint: + u, err = o.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + if err != nil { + fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + if err != nil { + fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth) + } + fmt.Printf("\n") + + o.buf = obuf + o.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + if f.IsNil() { + continue + } + // f is *T or []*T + if f.Kind() == reflect.Ptr { + setDefaults(f, recur, zeros) + } else { + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +func ptrToStruct(t reflect.Type) bool { + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + // nested messages + if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) { + dm.nested = append(dm.nested, fi) + continue + } + + sf := scalarField{ + index: fi, + kind: ft.Elem().Kind(), + } + + // scalar fields without defaults + if !prop.HasDefault { + dm.scalars = append(dm.scalars, sf) + continue + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + log.Printf("proto: bad default bool %q: %v", prop.Default, err) + continue + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + log.Printf("proto: bad default float32 %q: %v", prop.Default, err) + continue + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + log.Printf("proto: bad default float64 %q: %v", prop.Default, err) + continue + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + log.Printf("proto: bad default int32 %q: %v", prop.Default, err) + continue + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + log.Printf("proto: bad default int64 %q: %v", prop.Default, err) + continue + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + log.Printf("proto: bad default uint32 %q: %v", prop.Default, err) + continue + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + log.Printf("proto: bad default uint64 %q: %v", prop.Default, err) + continue + } + sf.value = x + default: + log.Printf("proto: unhandled def kind %v", ft.Elem().Kind()) + continue + } + + dm.scalars = append(dm.scalars, sf) + } + + return dm +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go new file mode 100644 index 000000000..1a1780983 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" + "reflect" + "sort" +) + +// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and MessageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. +// +// When a proto1 proto has a field that looks like: +// optional message info = 3; +// the protocol compiler produces a field in the generated struct that looks like: +// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` +// The package is automatically inserted so there is no need for that proto file to +// import this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type MessageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure MessageSet is a Message. +var _ Message = (*MessageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *MessageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *MessageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *MessageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return ErrNoMessageTypeId + } + return nil // TODO: return error instead? +} + +func (ms *MessageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return ErrNoMessageTypeId + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *MessageSet) Reset() { *ms = MessageSet{} } +func (ms *MessageSet) String() string { return CompactTextString(ms) } +func (*MessageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(MessageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(i messageTypeIder, name string) { + messageSetMap[i.MessageTypeId()] = messageSetDesc{ + t: reflect.TypeOf(i), + name: name, + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go new file mode 100644 index 000000000..bb311bccc --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &MessageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + m := make(map[int32]Extension) + if err := UnmarshalMessageSet(b, m); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := m[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", m) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..61141ba85 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go @@ -0,0 +1,384 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine,!appenginevm + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..27a536c88 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go @@ -0,0 +1,218 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine appenginevm + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go new file mode 100644 index 000000000..f2c1e861c --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go @@ -0,0 +1,658 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %T\n", t1) + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %T -> %T\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, lockGetProp) +} + +var ( + mutex sync.Mutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +func GetProperties(t reflect.Type) *StructProperties { + mutex.Lock() + sprop := getPropertiesLocked(t) + mutex.Unlock() + return sprop +} + +// getPropertiesLocked requires that mutex is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go new file mode 100644 index 000000000..55902a4a9 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go new file mode 100644 index 000000000..e8de6684a --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go @@ -0,0 +1,120 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "testing" + + pb "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile new file mode 100644 index 000000000..9fa10e4c6 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# http://code.google.com/p/goprotobuf/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + hg diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go new file mode 100644 index 000000000..f614aa1b7 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go new file mode 100644 index 000000000..5ffe48382 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go @@ -0,0 +1,2350 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + MyMessage + Ext + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint +*/ +package testdata + +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto new file mode 100644 index 000000000..9c0c4c897 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto @@ -0,0 +1,428 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go new file mode 100644 index 000000000..5063009c2 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go @@ -0,0 +1,701 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "math" + "os" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +// textMarshaler is implemented by Messages that can marshal themsleves. +// It is identical to encoding.TextMarshaler, introduced in go 1.2, +// which will eventually replace it. +type textMarshaler interface { + MarshalText() (text []byte, err error) +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +var ( + messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() +) + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + if sv.Type() == messageSetType { + return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) + } + + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(textMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeMessageSet(w *textWriter, ms *MessageSet) error { + for _, item := range ms.Item { + id := *item.TypeId + if msd, ok := messageSetMap[id]; ok { + // Known message set type. + if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { + return err + } + w.indent() + + pb := reflect.New(msd.t.Elem()) + if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { + if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { + return err + } + } else { + if err := writeStruct(w, pb.Elem()); err != nil { + return err + } + } + } else { + // Unknown type. + if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { + return err + } + w.indent() + if err := writeUnknownStruct(w, item.Message); err != nil { + return err + } + } + w.unindent() + if _, err := w.Write(gtNewline); err != nil { + return err + } + } + return nil +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { + return err + } + continue + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(textMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go new file mode 100644 index 000000000..d235ca9bc --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go @@ -0,0 +1,684 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// textUnmarshaler is implemented by Messages that can unmarshal themsleves. +// It is identical to encoding.TextUnmarshaler, introduced in go 1.2, +// which will eventually replace it. +type textUnmarshaler interface { + UnmarshalText(text []byte) error +} + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %v", p.s[0:i+1]) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +// Return an error indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *ParseError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return p.errorf("message %v missing required field %q", st, props.OrigName) + } + } + return p.errorf("message %v missing required field", st) // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { + sprops := GetProperties(st) + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError { + st := sv.Type() + reqCount := GetProperties(st).reqCount + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + return err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + } else { + // This is a normal, non-extension field. + fi, props, ok := structFieldByName(st, tok.value) + if !ok { + return p.errorf("unknown field name %q in %v", tok.value, st) + } + + dst := sv.Field(fi) + isDstNil := isNil(dst) + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && !isDstNil { + return p.errorf("non-repeated field %q was repeated", tok.value) + } + + if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + return err + } + + // Parse into the field. + if err := p.readAny(dst, props); err != nil { + return err + } + + if props.Required { + reqCount-- + } + } + + // For backward compatibility, permit a semicolon or comma after a field. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. May already exist. + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(at, flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement textUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(textUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go new file mode 100644 index 000000000..b5292bc31 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go @@ -0,0 +1,462 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "./testdata" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0"`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: ``, + err: `line 1.0: message testdata.MyMessage missing required field "count"`, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go new file mode 100644 index 000000000..e88d20d4f --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go @@ -0,0 +1,408 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://code.google.com/p/goprotobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + + pb "./testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go new file mode 100644 index 000000000..d93c1b9db --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" +) + +// ErrCorrupt reports that the input is invalid. +var ErrCorrupt = errors.New("snappy: corrupt input") + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n == 0 { + return 0, 0, ErrCorrupt + } + if uint64(int(v)) != v { + return 0, 0, errors.New("snappy: decoded block is too large") + } + return int(v), n, nil +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if len(dst) < dLen { + dst = make([]byte, dLen) + } + + var d, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint(src[s] >> 2) + switch { + case x < 60: + s += 1 + case x == 60: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-1]) + case x == 61: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-2]) | uint(src[s-1])<<8 + case x == 62: + s += 4 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 + case x == 63: + s += 5 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 + } + length = int(x + 1) + if length <= 0 { + return nil, errors.New("snappy: unsupported literal length") + } + if length > len(dst)-d || length > len(src)-s { + return nil, ErrCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) + + case tagCopy2: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(src[s-2]) | int(src[s-1])<<8 + + case tagCopy4: + return nil, errors.New("snappy: unsupported COPY_4 tag") + } + + end := d + length + if offset > d || end > len(dst) { + return nil, ErrCorrupt + } + for ; d < end; d++ { + dst[d] = dst[d-offset] + } + } + if d != dLen { + return nil, ErrCorrupt + } + return dst[:d], nil +} diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go new file mode 100644 index 000000000..b2371db11 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" +) + +// We limit how far copy back-references can go, the same as the C++ code. +const maxOffset = 1 << 15 + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + case n < 1<<16: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + case n < 1<<24: + dst[0] = 62<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + i = 4 + case int64(n) < 1<<32: + dst[0] = 63<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + dst[4] = uint8(n >> 24) + i = 5 + default: + panic("snappy: source buffer is too long") + } + if copy(dst[i:], lit) != len(lit) { + panic("snappy: destination buffer is too short") + } + return i + len(lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst []byte, offset, length int) int { + i := 0 + for length > 0 { + x := length - 4 + if 0 <= x && x < 1<<3 && offset < 1<<11 { + dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + i += 2 + break + } + + x = length + if x > 1<<6 { + x = 1 << 6 + } + dst[i+0] = uint8(x-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= x + } + return i +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Encode(dst, src []byte) ([]byte, error) { + if n := MaxEncodedLen(len(src)); len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + d += emitLiteral(dst[d:], src) + } + return dst[:d], nil + } + + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + const maxTableSize = 1 << 14 + shift, tableSize := uint(32-8), 1<<8 + for tableSize < maxTableSize && tableSize < len(src) { + shift-- + tableSize *= 2 + } + var table [maxTableSize]int + + // Iterate over the source bytes. + var ( + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for s+3 < len(src) { + // Update the hash table. + b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] + h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 + p := &table[(h*0x1e35a7bd)>>shift] + // We need to to store values in [-1, inf) in table. To save + // some initialization time, (re)use the table's zero value + // and shift the values against this zero: add 1 on writes, + // subtract 1 on reads. + t, *p = *p-1, s+1 + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { + s++ + continue + } + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + d += emitLiteral(dst[d:], src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s, t = s+4, t+4 + for s < len(src) && src[s] == src[t] { + s++ + t++ + } + // Emit the copied bytes. + d += emitCopy(dst[d:], s-t, s-s0) + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + d += emitLiteral(dst[d:], src[lit:]) + } + return dst[:d], nil +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +func MaxEncodedLen(srcLen int) int { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + srcLen + srcLen/6 +} diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go new file mode 100644 index 000000000..2f1b790d0 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go @@ -0,0 +1,38 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at http://code.google.com/p/snappy/ +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer supported. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go new file mode 100644 index 000000000..7ba839244 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go @@ -0,0 +1,261 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path/filepath" + "strings" + "testing" +) + +var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + +func roundtrip(b, ebuf, dbuf []byte) error { + e, err := Encode(ebuf, b) + if err != nil { + return fmt.Errorf("encoding error: %v", err) + } + d, err := Decode(dbuf, e) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if !bytes.Equal(b, d) { + return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rand.Seed(27354294) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(rand.Uint32()) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func benchDecode(b *testing.B, src []byte) { + encoded, err := Encode(nil, src) + if err != nil { + b.Fatal(err) + } + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Decode(src, encoded) + } +} + +func benchEncode(b *testing.B, src []byte) { + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + dst := make([]byte, MaxEncodedLen(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Encode(dst, src) + } +} + +func readFile(b *testing.B, filename string) []byte { + src, err := ioutil.ReadFile(filename) + if err != nil { + b.Fatalf("failed reading %s: %s", filename, err) + } + if len(src) == 0 { + b.Fatalf("%s has zero length", filename) + } + return src +} + +// expand returns a slice of length n containing repeated copies of src. +func expand(src []byte, n int) []byte { + dst := make([]byte, n) + for x := dst; len(x) > 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +// testFiles' values are copied directly from +// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string +}{ + {"html", "html"}, + {"urls", "urls.10K"}, + {"jpg", "house.jpg"}, + {"pdf", "mapreduce-osdi-1.pdf"}, + {"html4", "html_x_4"}, + {"cp", "cp.html"}, + {"c", "fields.c"}, + {"lsp", "grammar.lsp"}, + {"xls", "kennedy.xls"}, + {"txt1", "alice29.txt"}, + {"txt2", "asyoulik.txt"}, + {"txt3", "lcet10.txt"}, + {"txt4", "plrabn12.txt"}, + {"bin", "ptt5"}, + {"sum", "sum"}, + {"man", "xargs.1"}, + {"pb", "geo.protodata"}, + {"gaviota", "kppkn.gtb"}, +} + +// The test data files are present at this canonical URL. +const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" + +func downloadTestdata(basename string) (errRet error) { + filename := filepath.Join("testdata", basename) + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + resp, err := http.Get(baseURL + basename) + if err != nil { + return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) + } + defer resp.Body.Close() + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to write %s: %s", filename, err) + } + return nil +} + +func benchFile(b *testing.B, n int, decode bool) { + filename := filepath.Join("testdata", testFiles[n].filename) + if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { + if !*download { + b.Fatal("test data not found; skipping benchmark without the -download flag") + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { + b.Fatalf("failed to create testdata: %s", err) + } + for _, tf := range testFiles { + if err := downloadTestdata(tf.filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + } + } + data := readFile(b, filename) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } +func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } +func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } +func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } +func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } +func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } +func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } +func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } +func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } +func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } +func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } +func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } diff --git a/Godeps/_workspace/src/github.com/gonuts/flag/LICENSE b/Godeps/_workspace/src/github.com/gonuts/flag/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/flag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gonuts/flag/README.md b/Godeps/_workspace/src/github.com/gonuts/flag/README.md new file mode 100644 index 000000000..06b7a02be --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/flag/README.md @@ -0,0 +1,6 @@ +flag +======= + +[![Build Status](https://drone.io/github.com/gonuts/flag/status.png)](https://drone.io/github.com/gonuts/flag/latest) + +A fork of the official "flag" package but with the flag.Value interface extended to provide a ``Get() interface{}`` method. diff --git a/Godeps/_workspace/src/github.com/gonuts/flag/example_test.go b/Godeps/_workspace/src/github.com/gonuts/flag/example_test.go new file mode 100644 index 000000000..04a0d20ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/flag/example_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// These examples demonstrate more intricate uses of the flag package. +package flag_test + +import ( + "errors" + "flag" + "fmt" + "strings" + "time" +) + +// Example 1: A single string flag called "species" with default value "gopher". +var species = flag.String("species", "gopher", "the species we are studying") + +// Example 2: Two flags sharing a variable, so we can have a shorthand. +// The order of initialization is undefined, so make sure both use the +// same default value. They must be set up with an init function. +var gopherType string + +func init() { + const ( + defaultGopher = "pocket" + usage = "the variety of gopher" + ) + flag.StringVar(&gopherType, "gopher_type", defaultGopher, usage) + flag.StringVar(&gopherType, "g", defaultGopher, usage+" (shorthand)") +} + +// Example 3: A user-defined flag type, a slice of durations. +type interval []time.Duration + +// String is the method to format the flag's value, part of the flag.Value interface. +// The String method's output will be used in diagnostics. +func (i *interval) String() string { + return fmt.Sprint(*i) +} + +// Set is the method to set the flag value, part of the flag.Value interface. +// Set's argument is a string to be parsed to set the flag. +// It's a comma-separated list, so we split it. +func (i *interval) Set(value string) error { + // If we wanted to allow the flag to be set multiple times, + // accumulating values, we would delete this if statement. + // That would permit usages such as + // -deltaT 10s -deltaT 15s + // and other combinations. + if len(*i) > 0 { + return errors.New("interval flag already set") + } + for _, dt := range strings.Split(value, ",") { + duration, err := time.ParseDuration(dt) + if err != nil { + return err + } + *i = append(*i, duration) + } + return nil +} + +// Define a flag to accumulate durations. Because it has a special type, +// we need to use the Var function and therefore create the flag during +// init. + +var intervalFlag interval + +func init() { + // Tie the command-line flag to the intervalFlag variable and + // set a usage message. + flag.Var(&intervalFlag, "deltaT", "comma-separated list of intervals to use between events") +} + +func Example() { + // All the interesting pieces are with the variables declared above, but + // to enable the flag package to see the flags defined there, one must + // execute, typically at the start of main (not init!): + // flag.Parse() + // We don't run it here because this is not a main function and + // the testing suite has already parsed the flags. +} diff --git a/Godeps/_workspace/src/github.com/gonuts/flag/export_test.go b/Godeps/_workspace/src/github.com/gonuts/flag/export_test.go new file mode 100644 index 000000000..7b190807a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/flag/export_test.go @@ -0,0 +1,22 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flag + +import "os" + +// Additional routines compiled into the package only during testing. + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + commandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} + +// CommandLine returns the default FlagSet. +func CommandLine() *FlagSet { + return commandLine +} diff --git a/Godeps/_workspace/src/github.com/gonuts/flag/flag.go b/Godeps/_workspace/src/github.com/gonuts/flag/flag.go new file mode 100644 index 000000000..1e939e9ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/flag/flag.go @@ -0,0 +1,816 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing. + + Usage: + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + import "flag" + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments after the flag are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 up to flag.NArg(). + + Command line flag syntax: + -flag + -flag=x + -flag x // non-boolean flags only + One or two minus signs may be used; they are equivalent. + The last form is not permitted for boolean flags because the + meaning of the command + cmd -x * + will change if there is a file called 0, false, etc. You must + use the -flag=false form to turn off a boolean flag. + + Flag parsing stops just before the first non-flag argument + ("-" is a non-flag argument) or after the terminator "--". + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package flag + +import ( + "errors" + "fmt" + "io" + "os" + "sort" + "strconv" + "time" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return s.String() } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return *(*time.Duration)(d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for _, f := range flags { + list[i] = f.Name + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + commandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + commandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return commandLine.formal[name] +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return commandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + f.VisitAll(func(flag *Flag) { + format := " -%s=%s: %s\n" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = " -%s=%q: %s\n" + } + fmt.Fprintf(f.out(), format, flag.Name, flag.DefValue, flag.Usage) + }) +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + commandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(commandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(commandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return commandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(commandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return commandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.Var(newBoolValue(value, p), name, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + commandLine.Var(newBoolValue(value, p), name, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, name, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return commandLine.Bool(name, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.Var(newIntValue(value, p), name, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + commandLine.Var(newIntValue(value, p), name, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVar(p, name, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return commandLine.Int(name, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.Var(newInt64Value(value, p), name, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + commandLine.Var(newInt64Value(value, p), name, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, name, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return commandLine.Int64(name, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.Var(newUintValue(value, p), name, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + commandLine.Var(newUintValue(value, p), name, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, name, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return commandLine.Uint(name, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.Var(newUint64Value(value, p), name, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + commandLine.Var(newUint64Value(value, p), name, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, name, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return commandLine.Uint64(name, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.Var(newStringValue(value, p), name, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + commandLine.Var(newStringValue(value, p), name, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVar(p, name, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return commandLine.String(name, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.Var(newFloat64Value(value, p), name, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + commandLine.Var(newFloat64Value(value, p), name, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, name, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return commandLine.Float64(name, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), name, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + commandLine.Var(newDurationValue(value, p), name, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, name, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return commandLine.Duration(name, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{name, usage, value, value.String()} + _, alreadythere := f.formal[name] + if alreadythere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + commandLine.Var(value, name, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is commandLine. +func (f *FlagSet) usage() { + if f == commandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +// parseOne parses one flag. It returns whether a flag was seen. +func (f *FlagSet) parseOne() (bool, error) { + if len(f.args) == 0 { + return false, nil + } + s := f.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, nil + } + num_minuses := 1 + if s[1] == '-' { + num_minuses++ + if len(s) == 2 { // "--" terminates the flags + f.args = f.args[1:] + return false, nil + } + } + name := s[num_minuses:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + return false, f.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + f.args = f.args[1:] + has_value := false + value := "" + for i := 1; i < len(name); i++ { // equals cannot be first + if name[i] == '=' { + value = name[i+1:] + has_value = true + name = name[0:i] + break + } + } + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, ErrHelp + } + return false, f.failf("flag provided but not defined: -%s", name) + } + if fv, ok := flag.Value.(*boolValue); ok { // special case: doesn't need an arg + if has_value { + if err := fv.Set(value); err != nil { + f.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !has_value && len(f.args) > 0 { + // value is the next arg + has_value = true + value, f.args = f.args[0], f.args[1:] + } + if !has_value { + return false, f.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, f.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return true, nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = arguments + for { + seen, err := f.parseOne() + if seen { + continue + } + if err == nil { + break + } + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; commandLine is set for ExitOnError. + commandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return commandLine.Parsed() +} + +// The default set of command-line flags, parsed from os.Args. +var commandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} diff --git a/Godeps/_workspace/src/github.com/gonuts/flag/flag_test.go b/Godeps/_workspace/src/github.com/gonuts/flag/flag_test.go new file mode 100644 index 000000000..bf1987cd7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gonuts/flag/flag_test.go @@ -0,0 +1,288 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flag_test + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "testing" + "time" + + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" +) + +var ( + test_bool = Bool("test_bool", false, "bool value") + test_int = Int("test_int", 0, "int value") + test_int64 = Int64("test_int64", 0, "int64 value") + test_uint = Uint("test_uint", 0, "uint value") + test_uint64 = Uint64("test_uint64", 0, "uint64 value") + test_string = String("test_string", "0", "string value") + test_float64 = Float64("test_float64", 0, "float64 value") + test_duration = Duration("test_duration", 0, "time.Duration value") +) + +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + m[f.Name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case f.Name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case f.Name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", f.Name) + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if CommandLine().Parse([]string{"-x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool("bool", false, "bool value") + bool2Flag := f.Bool("bool2", false, "bool2 value") + intFlag := f.Int("int", 0, "int value") + int64Flag := f.Int64("int64", 0, "int64 value") + uintFlag := f.Uint("uint", 0, "uint value") + uint64Flag := f.Uint64("uint64", 0, "uint64 value") + stringFlag := f.String("string", "0", "string value") + float64Flag := f.Float64("float64", 0, "float64 value") + durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") + extra := "one-extra-argument" + args := []string{ + "-bool", + "-bool2=true", + "--int", "22", + "--int64", "0x23", + "-uint", "24", + "--uint64", "25", + "-string", "hello", + "-float64", "2718e28", + "-duration", "2m", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(CommandLine(), t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func (f *flagVar) Get() interface{} { return []string(*f) } + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, "v", "usage") + if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} + before := Bool("before", false, "") + if err := CommandLine().Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool("after", false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, "flag", false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"-flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by -flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"-help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, "help", false, "help flag") + helpCalled = false + err = fs.Parse([]string{"-help"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/AUTHORS b/Godeps/_workspace/src/github.com/jbenet/commander/AUTHORS new file mode 100644 index 000000000..be7ce33fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/AUTHORS @@ -0,0 +1,11 @@ +# This is the official list of Go-Commander authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Google Inc diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/CONTRIBUTORS b/Godeps/_workspace/src/github.com/jbenet/commander/CONTRIBUTORS new file mode 100644 index 000000000..b38ea4974 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/CONTRIBUTORS @@ -0,0 +1,31 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Go-Commander repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Juan Batiz-Benet +Sebastien Binet +Yves Junqueira diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/LICENSE b/Godeps/_workspace/src/github.com/jbenet/commander/LICENSE new file mode 100644 index 000000000..811abfed5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go-Commander Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/README.md b/Godeps/_workspace/src/github.com/jbenet/commander/README.md new file mode 100644 index 000000000..5a773a843 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/README.md @@ -0,0 +1,107 @@ +commander +============ + +[![Build Status](https://drone.io/github.com/gonuts/commander/status.png)](https://drone.io/github.com/gonuts/commander/latest) + +``commander`` is a spin off of [golang](http://golang.org) ``go tool`` infrastructure to provide commands and sub-commands. + +A ``commander.Command`` has a ``Subcommands`` field holding ``[]*commander.Command`` subcommands, referenced by name from the command line. + +So a ``Command`` can have sub commands. + +So you can have, _e.g._: +```sh +$ mycmd action1 [options...] +$ mycmd subcmd1 action1 [options...] +``` + +Example provided by: +- [hwaf](https://github.com/hwaf/hwaf) +- [examples/my-cmd](examples/my-cmd) + +## Documentation +Is available on [godoc](http://godoc.org/github.com/gonuts/commander) + +## Installation +Is performed with the usual: +```sh +$ go get github.com/gonuts/commander +``` + +## Example + +See the simple ``my-cmd`` example command for how this all hangs +together [there](http://github.com/gonuts/commander/blob/master/examples/my-cmd/main.go): + +```sh +$ my-cmd cmd1 +my-cmd-cmd1: hello from cmd1 (quiet=true) + +$ my-cmd cmd1 -q +my-cmd-cmd1: hello from cmd1 (quiet=true) + +$ my-cmd cmd1 -q=0 +my-cmd-cmd1: hello from cmd1 (quiet=false) + +$ my-cmd cmd2 +my-cmd-cmd2: hello from cmd2 (quiet=true) + +$ my-cmd subcmd1 cmd1 +my-cmd-subcmd1-cmd1: hello from subcmd1-cmd1 (quiet=true) + +$ my-cmd subcmd1 cmd2 +my-cmd-subcmd1-cmd2: hello from subcmd1-cmd2 (quiet=true) + +$ my-cmd subcmd2 cmd1 +my-cmd-subcmd2-cmd1: hello from subcmd2-cmd1 (quiet=true) + +$ my-cmd subcmd2 cmd2 +my-cmd-subcmd2-cmd2: hello from subcmd2-cmd2 (quiet=true) + +$ my-cmd help +Usage: + + my-cmd command [arguments] + +The commands are: + + cmd1 runs cmd1 and exits + cmd2 runs cmd2 and exits + subcmd1 subcmd1 subcommand. does subcmd1 thingies + subcmd2 subcmd2 subcommand. does subcmd2 thingies + +Use "my-cmd help [command]" for more information about a command. + +Additional help topics: + + +Use "my-cmd help [topic]" for more information about that topic. + + +$ my-cmd help subcmd1 +Usage: + + subcmd1 command [arguments] + +The commands are: + + cmd1 runs cmd1 and exits + cmd2 runs cmd2 and exits + + +Use "subcmd1 help [command]" for more information about a command. + +Additional help topics: + + +Use "subcmd1 help [topic]" for more information about that topic. + +``` + + +## TODO + +- automatically generate the bash/zsh/csh autocompletion lists +- automatically generate Readme examples text +- test cases + diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/commands.go b/Godeps/_workspace/src/github.com/jbenet/commander/commands.go new file mode 100644 index 000000000..135db22f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/commands.go @@ -0,0 +1,358 @@ +// Copyright 2012 The Go-Commander Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Based on the original work by The Go Authors: +// Copyright 2011 The Go Authors. All rights reserved. + +// commander helps creating command line programs whose arguments are flags, +// commands and subcommands. +package commander + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "sort" + "strings" + "text/template" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" +) + +// UsageSection differentiates between sections in the usage text. +type Listing int + +const ( + CommandsList = iota + HelpTopicsList + Unlisted +) + +// A Command is an implementation of a subcommand. +type Command struct { + + // UsageLine is the short usage message. + // The first word in the line is taken to be the command name. + UsageLine string + + // Short is the short description line shown in command lists. + Short string + + // Long is the long description shown in the 'help ' output. + Long string + + // List reports which list to show this command in Usage and Help. + // Choose between {CommandsList (default), HelpTopicsList, Unlisted} + List Listing + + // Run runs the command. + // The args are the arguments after the command name. + Run func(cmd *Command, args []string) error + + // Flag is a set of flags specific to this command. + Flag flag.FlagSet + + // CustomFlags indicates that the command will do its own + // flag parsing. + CustomFlags bool + + // Subcommands are dispatched from this command + Subcommands []*Command + + // Parent command, nil for root. + Parent *Command + + // UsageTemplate formats the usage (short) information displayed to the user + // (leave empty for default) + UsageTemplate string + + // HelpTemplate formats the help (long) information displayed to the user + // (leave empty for default) + HelpTemplate string + + // Stdout and Stderr by default are os.Stdout and os.Stderr, but you can + // point them at any io.Writer + Stdout io.Writer + Stderr io.Writer +} + +// Name returns the command's name: the first word in the usage line. +func (c *Command) Name() string { + name := c.UsageLine + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// Usage prints the usage details to the standard error output. +func (c *Command) Usage() { + c.usage() +} + +// FlagOptions returns the flag's options as a string +func (c *Command) FlagOptions() string { + var buf bytes.Buffer + c.Flag.SetOutput(&buf) + c.Flag.PrintDefaults() + + str := string(buf.Bytes()) + if len(str) > 0 { + return fmt.Sprintf("\nOptions:\n%s", str) + } + return "" +} + +// Runnable reports whether the command can be run; otherwise +// it is a documentation pseudo-command such as importpath. +func (c *Command) Runnable() bool { + return c.Run != nil +} + +// Type to allow us to use sort.Sort on a slice of Commands +type CommandSlice []*Command + +func (c CommandSlice) Len() int { + return len(c) +} + +func (c CommandSlice) Less(i, j int) bool { + return c[i].Name() < c[j].Name() +} + +func (c CommandSlice) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// Sort the commands +func (c *Command) SortCommands() { + sort.Sort(CommandSlice(c.Subcommands)) +} + +// Init the command +func (c *Command) init() { + if c.Parent != nil { + return // already initialized. + } + + // setup strings + if len(c.UsageLine) < 1 { + c.UsageLine = Defaults.UsageLine + } + if len(c.UsageTemplate) < 1 { + c.UsageTemplate = Defaults.UsageTemplate + } + if len(c.HelpTemplate) < 1 { + c.HelpTemplate = Defaults.HelpTemplate + } + + if c.Stderr == nil { + c.Stderr = os.Stderr + } + if c.Stdout == nil { + c.Stdout = os.Stdout + } + + // init subcommands + for _, cmd := range c.Subcommands { + cmd.init() + } + + // init hierarchy... + for _, cmd := range c.Subcommands { + cmd.Parent = c + } +} + +// Dispatch executes the command using the provided arguments. +// If a subcommand exists matching the first argument, it is dispatched. +// Otherwise, the command's Run function is called. +func (c *Command) Dispatch(args []string) error { + if c == nil { + return fmt.Errorf("Called Run() on a nil Command") + } + + // Ensure command is initialized. + c.init() + + // First, try a sub-command + if len(args) > 0 { + for _, cmd := range c.Subcommands { + n := cmd.Name() + if n == args[0] { + return cmd.Dispatch(args[1:]) + } + } + + // help is builtin (but after, to allow overriding) + if args[0] == "help" { + return c.help(args[1:]) + } + + // then, try out an external binary (git-style) + bin, err := exec.LookPath(c.FullName() + "-" + args[0]) + if err == nil { + cmd := exec.Command(bin, args[1:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = c.Stdout + cmd.Stderr = c.Stderr + return cmd.Run() + } + } + + // then, try running this command + if c.Runnable() { + if !c.CustomFlags { + var err = error(nil) + c.Flag.Usage = func() { + c.Usage() + err = fmt.Errorf("Failed to parse flags.") + } + c.Flag.Parse(args) + if err != nil { + return err + } + args = c.Flag.Args() + } + return c.Run(c, args) + } + + // TODO: try an alias + //... + + // Last, print usage + if err := c.usage(); err != nil { + return err + } + return nil +} + +func (c *Command) usage() error { + // c.SortCommands() + err := tmpl(c.Stderr, c.UsageTemplate, c) + if err != nil { + fmt.Println(err) + } + return err +} + +// help implements the 'help' command. +func (c *Command) help(args []string) error { + + // help exactly for this command? + if len(args) == 0 { + if len(c.Long) > 0 { + return tmpl(c.Stdout, c.HelpTemplate, c) + } else { + return c.usage() + } + } + + arg := args[0] + + // is this help for a subcommand? + for _, cmd := range c.Subcommands { + n := cmd.Name() + // strip out "-"" name + if strings.HasPrefix(n, c.Name()+"-") { + n = n[len(c.Name()+"-"):] + } + if n == arg { + return cmd.help(args[1:]) + } + } + + return fmt.Errorf("Unknown help topic %#q. Run '%v help'.\n", arg, c.Name()) +} + +func (c *Command) MaxLen() (res int) { + res = 0 + for _, cmd := range c.Subcommands { + i := len(cmd.Name()) + if i > res { + res = i + } + } + return +} + +// ColFormat returns the column header size format for printing in the template +func (c *Command) ColFormat() string { + sz := c.MaxLen() + if sz < 11 { + sz = 11 + } + return fmt.Sprintf("%%-%ds", sz) +} + +// FullName returns the full name of the command, prefixed with parent commands +func (c *Command) FullName() string { + n := c.Name() + if c.Parent != nil { + n = c.Parent.FullName() + "-" + n + } + return n +} + +// FullSpacedName returns the full name of the command, with ' ' instead of '-' +func (c *Command) FullSpacedName() string { + n := c.Name() + if c.Parent != nil { + n = c.Parent.FullSpacedName() + " " + n + } + return n +} + +func (c *Command) SubcommandList(list Listing) []*Command { + var cmds []*Command + for _, cmd := range c.Subcommands { + if cmd.List == list { + cmds = append(cmds, cmd) + } + } + return cmds +} + +var Defaults = Command{ + UsageTemplate: `{{if .Runnable}}Usage: {{if .Parent}}{{.Parent.FullSpacedName}}{{end}} {{.UsageLine}} + +{{else}}{{.FullSpacedName}} - {{end}}{{.Short}} + +{{if commandList}}Commands: +{{range commandList}} + {{.Name | printf (colfmt)}} {{.Short}}{{end}} + +Use "{{.Name}} help " for more information about a command. + +{{end}}{{.FlagOptions}}{{if helpList}} +Additional help topics: +{{range helpList}} + {{.Name | printf (colfmt)}} {{.Short}}{{end}} + +Use "{{.Name}} help " for more information about that topic. + +{{end}}`, + + HelpTemplate: `{{if .Runnable}}Usage: {{if .Parent}}{{.Parent.FullSpacedName}}{{end}} {{.UsageLine}} + +{{end}}{{.Long | trim}} +{{.FlagOptions}} +`, +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(template.FuncMap{ + "trim": strings.TrimSpace, + "colfmt": func() string { return data.(*Command).ColFormat() }, + "commandList": func() []*Command { return data.(*Command).SubcommandList(CommandsList) }, + "helpList": func() []*Command { return data.(*Command).SubcommandList(HelpTopicsList) }, + }) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd1.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd1.go new file mode 100644 index 000000000..f89a1c26e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd1.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var cmd_cmd1 = &commander.Command{ + Run: ex_run_cmd_cmd1, + UsageLine: "cmd1 [options]", + Short: "runs cmd1 and exits", + Long: ` +runs cmd1 and exits. + +ex: +$ my-cmd cmd1 +`, + Flag: *flag.NewFlagSet("my-cmd-cmd1", flag.ExitOnError), +} + +func init() { + cmd_cmd1.Flag.Bool("q", true, "only print error and warning messages, all other output will be suppressed") +} + +func ex_run_cmd_cmd1(cmd *commander.Command, args []string) error { + name := "my-cmd-" + cmd.Name() + quiet := cmd.Flag.Lookup("q").Value.Get().(bool) + fmt.Printf("%s: hello from cmd1 (quiet=%v)\n", name, quiet) + return nil +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd2.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd2.go new file mode 100644 index 000000000..e6a0b54e6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_cmd2.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +func ex_make_cmd_cmd2() *commander.Command { + cmd := &commander.Command{ + Run: ex_run_cmd_cmd2, + UsageLine: "cmd2 [options]", + Short: "runs cmd2 and exits", + Long: ` +runs cmd2 and exits. + +ex: + $ my-cmd cmd2 +`, + Flag: *flag.NewFlagSet("my-cmd-cmd2", flag.ExitOnError), + } + cmd.Flag.Bool("q", true, "only print error and warning messages, all other output will be suppressed") + return cmd +} + +func ex_run_cmd_cmd2(cmd *commander.Command, args []string) error { + name := "my-cmd-" + cmd.Name() + quiet := cmd.Flag.Lookup("q").Value.Get().(bool) + fmt.Printf("%s: hello from cmd2 (quiet=%v)\n", name, quiet) + return nil +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1.go new file mode 100644 index 000000000..375a0c5e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var cmd_subcmd1 = &commander.Command{ + UsageLine: "subcmd1 ", + Short: "subcmd1 subcommand. does subcmd1 thingies", + Subcommands: []*commander.Command{ + cmd_subcmd1_cmd1, + cmd_subcmd1_cmd2, + }, + Flag: *flag.NewFlagSet("my-cmd-subcmd1", flag.ExitOnError), +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd1.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd1.go new file mode 100644 index 000000000..82433a63e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd1.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var cmd_subcmd1_cmd1 = &commander.Command{ + Run: ex_run_cmd_subcmd1_cmd1, + UsageLine: "cmd1 [options]", + Short: "runs cmd1 and exits", + Long: ` +runs cmd1 and exits. + +ex: +$ my-cmd subcmd1 cmd1 +`, + Flag: *flag.NewFlagSet("my-cmd-subcmd1-cmd1", flag.ExitOnError), +} + +func init() { + cmd_subcmd1_cmd1.Flag.Bool("q", true, "only print error and warning messages, all other output will be suppressed") +} + +func ex_run_cmd_subcmd1_cmd1(cmd *commander.Command, args []string) error { + name := "my-cmd-subcmd1-" + cmd.Name() + quiet := cmd.Flag.Lookup("q").Value.Get().(bool) + fmt.Printf("%s: hello from subcmd1-cmd1 (quiet=%v)\n", name, quiet) + return nil +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd2.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd2.go new file mode 100644 index 000000000..1cc194f87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd1_cmd2.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var cmd_subcmd1_cmd2 = &commander.Command{ + Run: ex_run_cmd_subcmd1_cmd2, + UsageLine: "cmd2 [options]", + Short: "runs cmd2 and exits", + Long: ` +runs cmd2 and exits. + +ex: +$ my-cmd subcmd1 cmd2 +`, + Flag: *flag.NewFlagSet("my-cmd-subcmd1-cmd2", flag.ExitOnError), +} + +func init() { + cmd_subcmd1_cmd2.Flag.Bool("q", true, "only print error and warning messages, all other output will be suppressed") +} + +func ex_run_cmd_subcmd1_cmd2(cmd *commander.Command, args []string) error { + name := "my-cmd-subcmd1-" + cmd.Name() + quiet := cmd.Flag.Lookup("q").Value.Get().(bool) + fmt.Printf("%s: hello from subcmd1-cmd2 (quiet=%v)\n", name, quiet) + return nil +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2.go new file mode 100644 index 000000000..83c0160c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2.go @@ -0,0 +1,22 @@ +package main + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +func ex_make_cmd_subcmd2() *commander.Command { + cmd := &commander.Command{ + UsageLine: "subcmd2", + Short: "subcmd2 subcommand. does subcmd2 thingies (help list)", + List: commander.HelpTopicsList, + Subcommands: []*commander.Command{ + ex_make_cmd_subcmd2_cmd1(), + ex_make_cmd_subcmd2_cmd2(), + }, + Flag: *flag.NewFlagSet("my-cmd-subcmd2", flag.ExitOnError), + } + return cmd +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd1.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd1.go new file mode 100644 index 000000000..11d659b08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd1.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +func ex_make_cmd_subcmd2_cmd1() *commander.Command { + cmd := &commander.Command{ + Run: ex_run_cmd_subcmd2_cmd1, + UsageLine: "cmd1 [options]", + Short: "runs cmd1 and exits", + Long: ` +runs cmd1 and exits. + +ex: + $ my-cmd subcmd2 cmd1 +`, + Flag: *flag.NewFlagSet("my-cmd-subcmd2-cmd1", flag.ExitOnError), + } + cmd.Flag.Bool("q", true, "only print error and warning messages, all other output will be suppressed") + return cmd +} + +func ex_run_cmd_subcmd2_cmd1(cmd *commander.Command, args []string) error { + name := "my-cmd-subcmd2-" + cmd.Name() + quiet := cmd.Flag.Lookup("q").Value.Get().(bool) + fmt.Printf("%s: hello from subcmd2-cmd1 (quiet=%v)\n", name, quiet) + return nil +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd2.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd2.go new file mode 100644 index 000000000..dbdb5bbe6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/cmd_subcmd2_cmd2.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +func ex_make_cmd_subcmd2_cmd2() *commander.Command { + cmd := &commander.Command{ + Run: ex_run_cmd_subcmd2_cmd2, + UsageLine: "cmd2 [options]", + Short: "runs cmd2 and exits", + Long: ` +runs cmd2 and exits. + +ex: + $ my-cmd subcmd2 cmd2 +`, + Flag: *flag.NewFlagSet("my-cmd-subcmd2-cmd2", flag.ExitOnError), + } + cmd.Flag.Bool("q", true, "only print error and warning messages, all other output will be suppressed") + return cmd +} + +func ex_run_cmd_subcmd2_cmd2(cmd *commander.Command, args []string) error { + name := "my-cmd-subcmd2-" + cmd.Name() + quiet := cmd.Flag.Lookup("q").Value.Get().(bool) + fmt.Printf("%s: hello from subcmd2-cmd2 (quiet=%v)\n", name, quiet) + return nil +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/main.go b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/main.go new file mode 100644 index 000000000..6fa34aa7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/commander/examples/my-cmd/main.go @@ -0,0 +1,33 @@ +package main + +import ( + "fmt" + "os" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var g_cmd = &commander.Command{ + UsageLine: os.Args[0] + " does cool things", +} + +func init() { + g_cmd.Subcommands = []*commander.Command{ + cmd_cmd1, + ex_make_cmd_cmd2(), + cmd_subcmd1, + ex_make_cmd_subcmd2(), + } +} + +func main() { + err := g_cmd.Dispatch(os.Args[1:]) + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } + + return +} + +// EOF diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/LICENSE b/Godeps/_workspace/src/github.com/jbenet/datastore.go/LICENSE new file mode 100644 index 000000000..96bcd5df0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/Makefile b/Godeps/_workspace/src/github.com/jbenet/datastore.go/Makefile new file mode 100644 index 000000000..cfb2f6e63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/Makefile @@ -0,0 +1,12 @@ +build: + go build + +deps: + go get ./... + +watch: + -make + @echo "[watching *.go; for recompilation]" + # for portability, use watchmedo -- pip install watchmedo + @watchmedo shell-command --patterns="*.go;" --recursive \ + --command='make' . diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/README.md b/Godeps/_workspace/src/github.com/jbenet/datastore.go/README.md new file mode 100644 index 000000000..6c0a3124a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/README.md @@ -0,0 +1,15 @@ +# datastore interface + +datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime. + +In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding). + +Based on [datastore.py](https://github.com/datastore/datastore). + +### Documentation + +https://godoc.org/github.com/datastore/datastore.go + +### License + +MIT diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go new file mode 100644 index 000000000..33ea5f3a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go @@ -0,0 +1,118 @@ +package datastore + +import ( + "log" +) + +// Here are some basic datastore implementations. + +// MapDatastore uses a standard Go map for internal storage. +type keyMap map[Key]interface{} +type MapDatastore struct { + values keyMap +} + +func NewMapDatastore() (d *MapDatastore) { + return &MapDatastore{ + values: keyMap{}, + } +} + +func (d *MapDatastore) Put(key Key, value interface{}) (err error) { + d.values[key] = value + return nil +} + +func (d *MapDatastore) Get(key Key) (value interface{}, err error) { + val, found := d.values[key] + if !found { + return nil, ErrNotFound + } + return val, nil +} + +func (d *MapDatastore) Has(key Key) (exists bool, err error) { + _, found := d.values[key] + return found, nil +} + +func (d *MapDatastore) Delete(key Key) (err error) { + delete(d.values, key) + return nil +} + +func (d *MapDatastore) KeyList() ([]Key, error) { + var keys []Key + for k, _ := range d.values { + keys = append(keys, k) + } + return keys, nil +} + +// NullDatastore stores nothing, but conforms to the API. +// Useful to test with. +type NullDatastore struct { +} + +func NewNullDatastore() *NullDatastore { + return &NullDatastore{} +} + +func (d *NullDatastore) Put(key Key, value interface{}) (err error) { + return nil +} + +func (d *NullDatastore) Get(key Key) (value interface{}, err error) { + return nil, nil +} + +func (d *NullDatastore) Has(key Key) (exists bool, err error) { + return false, nil +} + +func (d *NullDatastore) Delete(key Key) (err error) { + return nil +} + +func (d *NullDatastore) KeyList() ([]Key, error) { + return nil, nil +} + +// LogDatastore logs all accesses through the datastore. +type LogDatastore struct { + Name string + Child Datastore +} + +func NewLogDatastore(ds Datastore, name string) *LogDatastore { + if len(name) < 1 { + name = "LogDatastore" + } + return &LogDatastore{Name: name, Child: ds} +} + +func (d *LogDatastore) Put(key Key, value interface{}) (err error) { + log.Printf("%s: Put %s", d.Name, key) + // log.Printf("%s: Put %s ```%s```", d.Name, key, value) + return d.Child.Put(key, value) +} + +func (d *LogDatastore) Get(key Key) (value interface{}, err error) { + log.Printf("%s: Get %s", d.Name, key) + return d.Child.Get(key) +} + +func (d *LogDatastore) Has(key Key) (exists bool, err error) { + log.Printf("%s: Has %s", d.Name, key) + return d.Child.Has(key) +} + +func (d *LogDatastore) Delete(key Key) (err error) { + log.Printf("%s: Delete %s", d.Name, key) + return d.Child.Delete(key) +} + +func (d *LogDatastore) KeyList() ([]Key, error) { + log.Printf("%s: Get KeyList.", d.Name) + return d.Child.KeyList() +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go new file mode 100644 index 000000000..9ff21a6a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go @@ -0,0 +1,87 @@ +package datastore + +import ( + "errors" +) + +/* +A Datastore represents storage for any key-value pair. + +Datastores are general enough to be backed by all kinds of different storage: +in-memory caches, databases, a remote datastore, flat files on disk, etc. + +The general idea is to wrap a more complicated storage facility in a simple, +uniform interface, keeping the freedom of using the right tools for the job. +In particular, a Datastore can aggregate other datastores in interesting ways, +like sharded (to distribute load) or tiered access (caches before databases). + +While Datastores should be written general enough to accept all sorts of +values, some implementations will undoubtedly have to be specific (e.g. SQL +databases where fields should be decomposed into columns), particularly to +support queries efficiently. Moreover, certain datastores may enforce certain +types of values (e.g. requiring an io.Reader, a specific struct, etc) or +serialization formats (JSON, Protobufs, etc). + +IMPORTANT: No Datastore should ever Panic! This is a cross-module interface, +and thus it should behave predictably and handle exceptional conditions with +proper error reporting. Thus, all Datastore calls may return errors, which +should be checked by callers. +*/ + +type Datastore interface { + // Put stores the object `value` named by `key`. + // + // The generalized Datastore interface does not impose a value type, + // allowing various datastore middleware implementations (which do not + // handle the values directly) to be composed together. + // + // Ultimately, the lowest-level datastore will need to do some value checking + // or risk getting incorrect values. It may also be useful to expose a more + // type-safe interface to your application, and do the checking up-front. + Put(key Key, value interface{}) (err error) + + // Get retrieves the object `value` named by `key`. + // Get will return ErrNotFound if the key is not mapped to a value. + Get(key Key) (value interface{}, err error) + + // Has returns whether the `key` is mapped to a `value`. + // In some contexts, it may be much cheaper only to check for existence of + // a value, rather than retrieving the value itself. (e.g. HTTP HEAD). + // The default implementation is found in `GetBackedHas`. + Has(key Key) (exists bool, err error) + + // Delete removes the value for given `key`. + Delete(key Key) (err error) + + // Returns a list of keys in the datastore + KeyList() ([]Key, error) +} + +// Errors + +// ErrNotFound is returned by Get, Has, and Delete when a datastore does not +// map the given key to a value. +var ErrNotFound = errors.New("datastore: key not found.") + +// ErrInvalidType is returned by Put when a given value is incopatible with +// the type the datastore supports. This means a conversion (or serialization) +// is needed beforehand. +var ErrInvalidType = errors.New("datastore: invalid type error.") + +// GetBackedHas provides a default Datastore.Has implementation. +// It exists so Datastore.Has implementations can use it, like so: +// +// func (*d SomeDatastore) Has(key Key) (exists bool, err error) { +// return GetBackedHas(d, key) +// } +func GetBackedHas(ds Datastore, key Key) (bool, error) { + _, err := ds.Get(key) + switch err { + case nil: + return true, nil + case ErrNotFound: + return false, nil + default: + return false, err + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/elastigo/datastore.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/elastigo/datastore.go new file mode 100644 index 000000000..d0f707bdc --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/elastigo/datastore.go @@ -0,0 +1,126 @@ +package elastigo + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "github.com/codahale/blake2" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + "github.com/mattbaird/elastigo/api" + "github.com/mattbaird/elastigo/core" +) + +// Currently, elastigo does not allow connecting to multiple elasticsearch +// instances. The elastigo API uses global static variables (ugh). +// See https://github.com/mattbaird/elastigo/issues/22 +// +// Thus, we use a global static variable (GlobalInstance), and return an +// error if NewDatastore is called twice with different addresses. +var GlobalInstance string + +// Datastore uses a standard Go map for internal storage. +type Datastore struct { + url string + index string + + // Elastic search does not allow slashes in their object ids, + // so we hash the key. By default, we use the provided BlakeKeyHash + KeyHash func(ds.Key) string +} + +func NewDatastore(urlstr string) (*Datastore, error) { + if GlobalInstance != "" && GlobalInstance != urlstr { + return nil, fmt.Errorf("elastigo only allows one client. See godoc.") + } + + uf := "http://:/" + u, err := url.Parse(urlstr) + if err != nil { + return nil, fmt.Errorf("error parsing url: %s (%s)", urlstr, uf) + } + + host := strings.Split(u.Host, ":") + api.Domain = host[0] + if len(host) > 1 { + api.Port = host[1] + } + + index := strings.Trim(u.Path, "/") + if strings.Contains(index, "/") { + e := "elastigo index cannot have slashes: %s (%s -> %s)" + return nil, fmt.Errorf(e, index, urlstr, uf) + } + + GlobalInstance = urlstr + return &Datastore{ + url: urlstr, + index: index, + KeyHash: BlakeKeyHash, + }, nil +} + +// Returns the ElasticSearch index for given key. If the datastore specifies +// an index, use that. Else, key.Parent +func (d *Datastore) Index(key ds.Key) string { + if len(d.index) > 0 { + return d.index + } + return key.Parent().BaseNamespace() +} + +// value should be JSON serializable. +func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { + id := d.KeyHash(key) + res, err := core.Index(false, d.Index(key), key.Type(), id, value) + if err != nil { + return err + } + if !res.Ok { + return fmt.Errorf("Elasticsearch response: NOT OK. %v", res) + } + return nil +} + +func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { + id := d.KeyHash(key) + res, err := core.Get(false, d.Index(key), key.Type(), id) + if err != nil { + return nil, err + } + if !res.Ok { + return nil, fmt.Errorf("Elasticsearch response: NOT OK. %v", res) + } + return res.Source, nil +} + +func (d *Datastore) Has(key ds.Key) (exists bool, err error) { + id := d.KeyHash(key) + return core.Exists(false, d.Index(key), key.Type(), id) +} + +func (d *Datastore) Delete(key ds.Key) (err error) { + id := d.KeyHash(key) + res, err := core.Delete(false, d.Index(key), key.Type(), id, 0, "") + if err != nil { + return err + } + if !res.Ok { + return fmt.Errorf("Elasticsearch response: NOT OK. %v", res) + } + return nil +} + +func (d *Datastore) KeyList() ([]ds.Key, error) { + return nil, errors.New("Not yet implemented!") +} + +// Hash a key and return the first 16 hex chars of its blake2b hash. +// basically: Blake2b(key).HexString[:16] +func BlakeKeyHash(key ds.Key) string { + h := blake2.NewBlake2B() + h.Write(key.Bytes()) + d := h.Sum(nil) + return fmt.Sprintf("%x", d)[:16] +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/key.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/key.go new file mode 100644 index 000000000..23c2d2db9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/key.go @@ -0,0 +1,189 @@ +package datastore + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid" + "path" + "strings" +) + +/* +A Key represents the unique identifier of an object. +Our Key scheme is inspired by file systems and Google App Engine key model. + +Keys are meant to be unique across a system. Keys are hierarchical, +incorporating more and more specific namespaces. Thus keys can be deemed +'children' or 'ancestors' of other keys:: + + Key("/Comedy") + Key("/Comedy/MontyPython") + +Also, every namespace can be parametrized to embed relevant object +information. For example, the Key `name` (most specific namespace) could +include the object type:: + + Key("/Comedy/MontyPython/Actor:JohnCleese") + Key("/Comedy/MontyPython/Sketch:CheeseShop") + Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender") + +*/ +type Key struct { + string +} + +func NewKey(s string) Key { + k := Key{s} + k.Clean() + return k +} + +// Cleans up a Key, using path.Clean. +func (k *Key) Clean() { + k.string = path.Clean("/" + k.string) +} + +// Returns the string value of Key +func (k Key) String() string { + return k.string +} + +// Returns the bytes value of Key +func (k Key) Bytes() []byte { + return []byte(k.string) +} + +// Returns the `list` representation of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// ["Comedy", "MontyPythong", "Actor:JohnCleese"] +func (k Key) List() []string { + return strings.Split(k.string, "/")[1:] +} + +// Returns the reverse of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse() +// NewKey("/Actor:JohnCleese/MontyPython/Comedy") +func (k Key) Reverse() Key { + l := k.List() + r := make([]string, len(l), len(l)) + for i, e := range l { + r[len(l)-i-1] = e + } + return NewKey(strings.Join(r, "/")) +} + +// Returns the `namespaces` making up this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// ["Comedy", "MontyPythong", "Actor:JohnCleese"] +func (k Key) Namespaces() []string { + return k.List() +} + +// Returns the "base" namespace of this key (like path.Base(filename)) +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace() +// "Actor:JohnCleese" +func (k Key) BaseNamespace() string { + n := k.Namespaces() + return n[len(n)-1] +} + +// Returns the "type" of this key (value of last namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// "Actor" +func (k Key) Type() string { + return NamespaceType(k.BaseNamespace()) +} + +// Returns the "name" of this key (field of last namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// "Actor" +func (k Key) Name() string { + return NamespaceValue(k.BaseNamespace()) +} + +// Returns an "instance" of this type key (appends value to namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// "JohnCleese" +func (k Key) Instance(s string) Key { + return NewKey(k.string + ":" + s) +} + +// Returns the "path" of this key (parent + type). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path() +// NewKey("/Comedy/MontyPython/Actor") +func (k Key) Path() Key { + s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace()) + return NewKey(s) +} + +// Returns the `parent` Key of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent() +// NewKey("/Comedy/MontyPython") +func (k Key) Parent() Key { + n := k.List() + if len(n) == 1 { + return NewKey("/") + } + return NewKey(strings.Join(n[:len(n)-1], "/")) +} + +// Returns the `child` Key of this Key. +// NewKey("/Comedy/MontyPython").Child("Actor:JohnCleese") +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) Child(s string) Key { + return NewKey(k.string + "/" + s) +} + +// Returns whether this key is an ancestor of `other` +// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython") +// true +func (k Key) IsAncestorOf(other Key) bool { + if other.string == k.string { + return false + } + return strings.HasPrefix(other.string, k.string) +} + +// Returns whether this key is a descendent of `other` +// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy") +// true +func (k Key) IsDescendantOf(other Key) bool { + if other.string == k.string { + return false + } + return strings.HasPrefix(k.string, other.string) +} + +func (k Key) IsTopLevel() bool { + return len(k.List()) == 1 +} + +// Returns a randomly (uuid) generated key. +// RandomKey() +// NewKey("/f98719ea086343f7b71f32ea9d9d521d") +func RandomKey() Key { + return NewKey(strings.Replace(uuid.New(), "-", "", -1)) +} + +/* +A Key Namespace is like a path element. +A namespace can optionally include a type (delimited by ':') + + > NamespaceValue("Song:PhilosopherSong") + PhilosopherSong + > NamespaceType("Song:PhilosopherSong") + Song + > NamespaceType("Music:Song:PhilosopherSong") + Music:Song +*/ + +func NamespaceType(namespace string) string { + parts := strings.Split(namespace, ":") + if len(parts) < 2 { + return "" + } + return strings.Join(parts[0:len(parts)-1], ":") +} + +func NamespaceValue(namespace string) string { + parts := strings.Split(namespace, ":") + return parts[len(parts)-1] +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go new file mode 100644 index 000000000..df210523f --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go @@ -0,0 +1,127 @@ +package datastore_test + +import ( + "bytes" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + . "launchpad.net/gocheck" + "math/rand" + "path" + "strings" + "testing" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func randomString() string { + chars := "abcdefghijklmnopqrstuvwxyz1234567890" + var buf bytes.Buffer + l := rand.Intn(50) + for j := 0; j < l; j++ { + buf.WriteByte(chars[rand.Intn(len(chars))]) + } + return buf.String() +} + +type KeySuite struct{} + +var _ = Suite(&KeySuite{}) + +func (ks *KeySuite) SubtestKey(s string, c *C) { + fixed := path.Clean("/" + s) + namespaces := strings.Split(fixed, "/")[1:] + lastNamespace := namespaces[len(namespaces)-1] + lnparts := strings.Split(lastNamespace, ":") + ktype := "" + if len(lnparts) > 1 { + ktype = strings.Join(lnparts[:len(lnparts)-1], ":") + } + kname := lnparts[len(lnparts)-1] + + kchild := path.Clean(fixed + "/cchildd") + kparent := "/" + strings.Join(append(namespaces[:len(namespaces)-1]), "/") + kpath := path.Clean(kparent + "/" + ktype) + kinstance := fixed + ":" + "inst" + + c.Log("Testing: ", NewKey(s)) + + c.Check(NewKey(s).String(), Equals, fixed) + c.Check(NewKey(s), Equals, NewKey(s)) + c.Check(NewKey(s).String(), Equals, NewKey(s).String()) + c.Check(NewKey(s).Name(), Equals, kname) + c.Check(NewKey(s).Type(), Equals, ktype) + c.Check(NewKey(s).Path().String(), Equals, kpath) + c.Check(NewKey(s).Instance("inst").String(), Equals, kinstance) + + c.Check(NewKey(s).Child("cchildd").String(), Equals, kchild) + c.Check(NewKey(s).Child("cchildd").Parent().String(), Equals, fixed) + c.Check(NewKey(s).Parent().String(), Equals, kparent) + c.Check(len(NewKey(s).List()), Equals, len(namespaces)) + c.Check(len(NewKey(s).Namespaces()), Equals, len(namespaces)) + for i, e := range NewKey(s).List() { + c.Check(namespaces[i], Equals, e) + } +} + +func (ks *KeySuite) TestKeyBasic(c *C) { + ks.SubtestKey("", c) + ks.SubtestKey("abcde", c) + ks.SubtestKey("disahfidsalfhduisaufidsail", c) + ks.SubtestKey("/fdisahfodisa/fdsa/fdsafdsafdsafdsa/fdsafdsa/", c) + ks.SubtestKey("4215432143214321432143214321", c) + ks.SubtestKey("/fdisaha////fdsa////fdsafdsafdsafdsa/fdsafdsa/", c) + ks.SubtestKey("abcde:fdsfd", c) + ks.SubtestKey("disahfidsalfhduisaufidsail:fdsa", c) + ks.SubtestKey("/fdisahfodisa/fdsa/fdsafdsafdsafdsa/fdsafdsa/:", c) + ks.SubtestKey("4215432143214321432143214321:", c) + ks.SubtestKey("fdisaha////fdsa////fdsafdsafdsafdsa/fdsafdsa/f:fdaf", c) +} + +func CheckTrue(c *C, cond bool) { + c.Check(cond, Equals, true) +} + +func (ks *KeySuite) TestKeyAncestry(c *C) { + k1 := NewKey("/A/B/C") + k2 := NewKey("/A/B/C/D") + + c.Check(k1.String(), Equals, "/A/B/C") + c.Check(k2.String(), Equals, "/A/B/C/D") + CheckTrue(c, k1.IsAncestorOf(k2)) + CheckTrue(c, k2.IsDescendantOf(k1)) + CheckTrue(c, NewKey("/A").IsAncestorOf(k2)) + CheckTrue(c, NewKey("/A").IsAncestorOf(k1)) + CheckTrue(c, !NewKey("/A").IsDescendantOf(k2)) + CheckTrue(c, !NewKey("/A").IsDescendantOf(k1)) + CheckTrue(c, k2.IsDescendantOf(NewKey("/A"))) + CheckTrue(c, k1.IsDescendantOf(NewKey("/A"))) + CheckTrue(c, !k2.IsAncestorOf(NewKey("/A"))) + CheckTrue(c, !k1.IsAncestorOf(NewKey("/A"))) + CheckTrue(c, !k2.IsAncestorOf(k2)) + CheckTrue(c, !k1.IsAncestorOf(k1)) + c.Check(k1.Child("D").String(), Equals, k2.String()) + c.Check(k1.String(), Equals, k2.Parent().String()) + c.Check(k1.Path().String(), Equals, k2.Parent().Path().String()) +} + +func (ks *KeySuite) TestType(c *C) { + k1 := NewKey("/A/B/C:c") + k2 := NewKey("/A/B/C:c/D:d") + + CheckTrue(c, k1.IsAncestorOf(k2)) + CheckTrue(c, k2.IsDescendantOf(k1)) + c.Check(k1.Type(), Equals, "C") + c.Check(k2.Type(), Equals, "D") + c.Check(k1.Type(), Equals, k2.Parent().Type()) +} + +func (ks *KeySuite) TestRandom(c *C) { + keys := map[Key]bool{} + for i := 0; i < 1000; i++ { + r := RandomKey() + _, found := keys[r] + CheckTrue(c, !found) + keys[r] = true + } + CheckTrue(c, len(keys) == 1000) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go new file mode 100644 index 000000000..1b00d8bfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go @@ -0,0 +1,78 @@ +package leveldb + +import ( + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" +) + +// Datastore uses a standard Go map for internal storage. +type Datastore struct { + DB *leveldb.DB +} + +type Options opt.Options + +func NewDatastore(path string, opts *Options) (*Datastore, error) { + var nopts opt.Options + if opts != nil { + nopts = opt.Options(*opts) + } + db, err := leveldb.OpenFile(path, &nopts) + if err != nil { + return nil, err + } + + return &Datastore{ + DB: db, + }, nil +} + +// Returns ErrInvalidType if value is not of type []byte. +// +// Note: using sync = false. +// see http://godoc.org/github.com/syndtr/goleveldb/leveldb/opt#WriteOptions +func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { + val, ok := value.([]byte) + if !ok { + return ds.ErrInvalidType + } + return d.DB.Put(key.Bytes(), val, nil) +} + +func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { + val, err := d.DB.Get(key.Bytes(), nil) + if err != nil { + if err == leveldb.ErrNotFound { + return nil, ds.ErrNotFound + } + return nil, err + } + return val, nil +} + +func (d *Datastore) Has(key ds.Key) (exists bool, err error) { + return ds.GetBackedHas(d, key) +} + +func (d *Datastore) Delete(key ds.Key) (err error) { + err = d.DB.Delete(key.Bytes(), nil) + if err == leveldb.ErrNotFound { + return ds.ErrNotFound + } + return err +} + +func (d *Datastore) KeyList() ([]ds.Key, error) { + i := d.DB.NewIterator(nil, nil) + var keys []ds.Key + for ; i.Valid(); i.Next() { + keys = append(keys, ds.NewKey(string(i.Key()))) + } + return keys, nil +} + +// LevelDB needs to be closed. +func (d *Datastore) Close() (err error) { + return d.DB.Close() +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-base58/LICENSE b/Godeps/_workspace/src/github.com/jbenet/go-base58/LICENSE new file mode 100644 index 000000000..0d760cbb4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-base58/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2013 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/jbenet/go-base58/README.md b/Godeps/_workspace/src/github.com/jbenet/go-base58/README.md new file mode 100644 index 000000000..ece243341 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-base58/README.md @@ -0,0 +1,66 @@ +# go-base58 + +I extracted this package from https://github.com/conformal/btcutil to provide a simple base58 package that +- defaults to base58-check (btc) +- and allows using different alphabets. + +## Usage + +```go +package main + +import ( + "fmt" + b58 "github.com/jbenet/go-base58" +) + +func main() { + buf := []byte{255, 254, 253, 252} + fmt.Printf("buffer: %v\n", buf) + + str := b58.Encode(buf) + fmt.Printf("encoded: %s\n", str) + + buf2 := b58.Decode(str) + fmt.Printf("decoded: %v\n", buf2) +} +``` + +### Another alphabet + +```go +package main + +import ( + "fmt" + b58 "github.com/jbenet/go-base58" +) + +const BogusAlphabet = "ZYXWVUTSRQPNMLKJHGFEDCBAzyxwvutsrqponmkjihgfedcba987654321" + + +func encdec(alphabet string) { + fmt.Printf("using: %s\n", alphabet) + + buf := []byte{255, 254, 253, 252} + fmt.Printf("buffer: %v\n", buf) + + str := b58.EncodeAlphabet(buf, alphabet) + fmt.Printf("encoded: %s\n", str) + + buf2 := b58.DecodeAlphabet(str, alphabet) + fmt.Printf("decoded: %v\n\n", buf2) +} + + +func main() { + encdec(b58.BTCAlphabet) + encdec(b58.FlickrAlphabet) + encdec(BogusAlphabet) +} +``` + + +## License + +Package base58 (and the original btcutil) are licensed under the ISC License. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-base58/base58.go b/Godeps/_workspace/src/github.com/jbenet/go-base58/base58.go new file mode 100644 index 000000000..ad91df54a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-base58/base58.go @@ -0,0 +1,90 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. +// Modified by Juan Benet (juan@benet.ai) + +package base58 + +import ( + "math/big" + "strings" +) + +// alphabet is the modified base58 alphabet used by Bitcoin. +const BTCAlphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" +const FlickrAlphabet = "123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ" + +var bigRadix = big.NewInt(58) +var bigZero = big.NewInt(0) + +// Decode decodes a modified base58 string to a byte slice, using BTCAlphabet +func Decode(b string) []byte { + return DecodeAlphabet(b, BTCAlphabet) +} + +// Encode encodes a byte slice to a modified base58 string, using BTCAlphabet +func Encode(b []byte) string { + return EncodeAlphabet(b, BTCAlphabet) +} + +// DecodeAlphabet decodes a modified base58 string to a byte slice, using alphabet. +func DecodeAlphabet(b, alphabet string) []byte { + answer := big.NewInt(0) + j := big.NewInt(1) + + for i := len(b) - 1; i >= 0; i-- { + tmp := strings.IndexAny(alphabet, string(b[i])) + if tmp == -1 { + return []byte("") + } + idx := big.NewInt(int64(tmp)) + tmp1 := big.NewInt(0) + tmp1.Mul(j, idx) + + answer.Add(answer, tmp1) + j.Mul(j, bigRadix) + } + + tmpval := answer.Bytes() + + var numZeros int + for numZeros = 0; numZeros < len(b); numZeros++ { + if b[numZeros] != alphabet[0] { + break + } + } + flen := numZeros + len(tmpval) + val := make([]byte, flen, flen) + copy(val[numZeros:], tmpval) + + return val +} + +// Encode encodes a byte slice to a modified base58 string, using alphabet +func EncodeAlphabet(b []byte, alphabet string) string { + x := new(big.Int) + x.SetBytes(b) + + answer := make([]byte, 0, len(b)*136/100) + for x.Cmp(bigZero) > 0 { + mod := new(big.Int) + x.DivMod(x, bigRadix, mod) + answer = append(answer, alphabet[mod.Int64()]) + } + + // leading zero bytes + for _, i := range b { + if i != 0 { + break + } + answer = append(answer, alphabet[0]) + } + + // reverse + alen := len(answer) + for i := 0; i < alen/2; i++ { + answer[i], answer[alen-1-i] = answer[alen-1-i], answer[i] + } + + return string(answer) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-base58/base58_test.go b/Godeps/_workspace/src/github.com/jbenet/go-base58/base58_test.go new file mode 100644 index 000000000..516781bb2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-base58/base58_test.go @@ -0,0 +1,96 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +var stringTests = []struct { + in string + out string +}{ + {"", ""}, + {" ", "Z"}, + {"-", "n"}, + {"0", "q"}, + {"1", "r"}, + {"-1", "4SU"}, + {"11", "4k8"}, + {"abc", "ZiCa"}, + {"1234598760", "3mJr7AoUXx2Wqd"}, + {"abcdefghijklmnopqrstuvwxyz", "3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f"}, + {"00000000000000000000000000000000000000000000000000000000000000", "3sN2THZeE9Eh9eYrwkvZqNstbHGvrxSAM7gXUXvyFQP8XvQLUqNCS27icwUeDT7ckHm4FUHM2mTVh1vbLmk7y"}, +} + +var invalidStringTests = []struct { + in string + out string +}{ + {"0", ""}, + {"O", ""}, + {"I", ""}, + {"l", ""}, + {"3mJr0", ""}, + {"O3yxU", ""}, + {"3sNI", ""}, + {"4kl8", ""}, + {"0OIl", ""}, + {"!@#$%^&*()-_=+~`", ""}, +} + +var hexTests = []struct { + in string + out string +}{ + {"61", "2g"}, + {"626262", "a3gV"}, + {"636363", "aPEr"}, + {"73696d706c792061206c6f6e6720737472696e67", "2cFupjhnEsSn59qHXstmK2ffpLv2"}, + {"00eb15231dfceb60925886b67d065299925915aeb172c06647", "1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L"}, + {"516b6fcd0f", "ABnLTmg"}, + {"bf4f89001e670274dd", "3SEo3LWLoPntC"}, + {"572e4794", "3EFU7m"}, + {"ecac89cad93923c02321", "EJDM8drfXA6uyA"}, + {"10c8511e", "Rt5zm"}, + {"00000000000000000000", "1111111111"}, +} + +func TestBase58(t *testing.T) { + // Base58Encode tests + for x, test := range stringTests { + tmp := []byte(test.in) + if res := Encode(tmp); res != test.out { + t.Errorf("Base58Encode test #%d failed: got: %s want: %s", + x, res, test.out) + continue + } + } + + // Base58Decode tests + for x, test := range hexTests { + b, err := hex.DecodeString(test.in) + if err != nil { + t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in) + continue + } + if res := Decode(test.out); bytes.Equal(res, b) != true { + t.Errorf("Base58Decode test #%d failed: got: %q want: %q", + x, res, test.in) + continue + } + } + + // Base58Decode with invalid input + for x, test := range invalidStringTests { + if res := Decode(test.in); string(res) != test.out { + t.Errorf("Base58Decode invalidString test #%d failed: got: %q want: %q", + x, res, test.out) + continue + } + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-base58/doc.go b/Godeps/_workspace/src/github.com/jbenet/go-base58/doc.go new file mode 100644 index 000000000..315c6107d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-base58/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2013-2014 Conformal Systems LLC. +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package base58 provides base58-check encoding. +The alphabet is modifyiable for + +Base58 Usage + +To decode a base58 string: + + rawData := base58.Base58Decode(encodedData) + +Similarly, to encode the same data: + + encodedData := base58.Base58Encode(rawData) + +*/ +package base58 diff --git a/Godeps/_workspace/src/github.com/jbenet/go-msgio/README.md b/Godeps/_workspace/src/github.com/jbenet/go-msgio/README.md new file mode 100644 index 000000000..2cab8d24c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-msgio/README.md @@ -0,0 +1,78 @@ +# go-msgio - Message IO + +This is a simple package that helps read and write length-delimited slices. It's helpful for building wire protocols. + +## Usage + +### Reading + +```go +import "github.com/jbenet/msgio" +rdr := ... // some reader from a wire +mrdr := msgio.NewReader(rdr) + +for { + msg, err := mrdr.ReadMsg() + if err != nil { + return err + } + + doSomething(msg) +} +``` + +### Writing + +```go +import "github.com/jbenet/msgio" +wtr := genReader() +mwtr := msgio.NewWriter(wtr) + +for { + msg := genMessage() + err := mwtr.WriteMsg(msg) + if err != nil { + return err + } +} +``` + +### Duplex + +```go +import "github.com/jbenet/msgio" +rw := genReadWriter() +mrw := msgio.NewReadWriter(rw) + +for { + msg, err := mrdr.ReadMsg() + if err != nil { + return err + } + + // echo it back :) + err = mwtr.WriteMsg(msg) + if err != nil { + return err + } +} +``` + +### Channels + +```go +import "github.com/jbenet/msgio" +rw := genReadWriter() +rch := msgio.NewReadChannel(rw) +wch := msgio.NewWriteChannel(rw) + +for { + msg, err := <-rch + if err != nil { + return err + } + + // echo it back :) + wch<- rw +} +``` diff --git a/Godeps/_workspace/src/github.com/jbenet/go-msgio/chan.go b/Godeps/_workspace/src/github.com/jbenet/go-msgio/chan.go new file mode 100644 index 000000000..4d5af5b8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-msgio/chan.go @@ -0,0 +1,85 @@ +package msgio + +import ( + "io" +) + +type Chan struct { + Buffers [][]byte + MsgChan chan []byte + ErrChan chan error + CloseChan chan bool +} + +func NewChan(chanSize int) *Chan { + return &Chan{ + MsgChan: make(chan []byte, chanSize), + ErrChan: make(chan error, 1), + CloseChan: make(chan bool, 2), + } +} + +func (s *Chan) ReadFrom(r io.Reader, maxMsgLen int) { + // new buffer per message + // if bottleneck, cycle around a set of buffers + mr := NewReader(r) +Loop: + for { + buf := make([]byte, maxMsgLen) + l, err := mr.ReadMsg(buf) + if err != nil { + if err == io.EOF { + break Loop // done + } + + // unexpected error. tell the client. + s.ErrChan <- err + break Loop + } + + select { + case <-s.CloseChan: + break Loop // told we're done + case s.MsgChan <- buf[:l]: + // ok seems fine. send it away + } + } + + close(s.MsgChan) + // signal we're done + s.CloseChan <- true +} + +func (s *Chan) WriteTo(w io.Writer) { + // new buffer per message + // if bottleneck, cycle around a set of buffers + mw := NewWriter(w) +Loop: + for { + select { + case <-s.CloseChan: + break Loop // told we're done + + case msg, ok := <-s.MsgChan: + if !ok { // chan closed + break Loop + } + + if err := mw.WriteMsg(msg); err != nil { + if err != io.EOF { + // unexpected error. tell the client. + s.ErrChan <- err + } + + break Loop + } + } + } + + // signal we're done + s.CloseChan <- true +} + +func (s *Chan) Close() { + s.CloseChan <- true +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-msgio/chan_test.go b/Godeps/_workspace/src/github.com/jbenet/go-msgio/chan_test.go new file mode 100644 index 000000000..043baa2bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-msgio/chan_test.go @@ -0,0 +1,110 @@ +package msgio + +import ( + "bytes" + randbuf "github.com/jbenet/go-randbuf" + "io" + "math/rand" + "testing" + "time" +) + +func TestReadChan(t *testing.T) { + buf := bytes.NewBuffer(nil) + writer := NewWriter(buf) + rchan := NewChan(10) + msgs := [1000][]byte{} + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range msgs { + msgs[i] = randbuf.RandBuf(r, r.Intn(1000)) + err := writer.WriteMsg(msgs[i]) + if err != nil { + t.Fatal(err) + } + } + + if err := writer.Close(); err != nil { + t.Fatal(err) + } + + go rchan.ReadFrom(buf, 1000) + defer rchan.Close() + +Loop: + for i := 0; ; i++ { + select { + case err := <-rchan.ErrChan: + if err != nil { + t.Fatal("unexpected error", err) + } + + case msg2, ok := <-rchan.MsgChan: + if !ok { + if i < len(msg2) { + t.Error("failed to read all messages", len(msgs), i) + } + break Loop + } + + msg1 := msgs[i] + if !bytes.Equal(msg1, msg2) { + t.Fatal("message retrieved not equal\n", msg1, "\n\n", msg2) + } + } + } +} + +func TestWriteChan(t *testing.T) { + buf := bytes.NewBuffer(nil) + reader := NewReader(buf) + wchan := NewChan(10) + msgs := [1000][]byte{} + + go wchan.WriteTo(buf) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range msgs { + msgs[i] = randbuf.RandBuf(r, r.Intn(1000)) + + select { + case err := <-wchan.ErrChan: + if err != nil { + t.Fatal("unexpected error", err) + } + + case wchan.MsgChan <- msgs[i]: + } + } + + // tell chan we're done. + close(wchan.MsgChan) + // wait for writing to end + <-wchan.CloseChan + + defer wchan.Close() + + for i := 0; ; i++ { + msg2 := make([]byte, 1000) + n, err := reader.ReadMsg(msg2) + if err != nil { + if err == io.EOF { + if i < len(msg2) { + t.Error("failed to read all messages", len(msgs), i) + } + break + } + t.Error("unexpected error", err) + } + + msg1 := msgs[i] + msg2 = msg2[:n] + if !bytes.Equal(msg1, msg2) { + t.Fatal("message retrieved not equal\n", msg1, "\n\n", msg2) + } + } + + if err := reader.Close(); err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go b/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go new file mode 100644 index 000000000..1d3a3d056 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go @@ -0,0 +1,111 @@ +package msgio + +import ( + "encoding/binary" + "io" +) + +var NBO = binary.BigEndian + +type Writer interface { + WriteMsg([]byte) error +} + +type WriteCloser interface { + Writer + io.Closer +} + +type Reader interface { + ReadMsg([]byte) (int, error) +} + +type ReadCloser interface { + Reader + io.Closer +} + +type ReadWriter interface { + Reader + Writer +} + +type ReadWriteCloser interface { + Reader + Writer + io.Closer +} + +type Writer_ struct { + W io.Writer +} + +func NewWriter(w io.Writer) WriteCloser { + return &Writer_{w} +} + +func (s *Writer_) WriteMsg(msg []byte) (err error) { + length := uint32(len(msg)) + if err := binary.Write(s.W, NBO, &length); err != nil { + return err + } + _, err = s.W.Write(msg) + return err +} + +func (s *Writer_) Close() error { + if c, ok := s.W.(io.Closer); ok { + return c.Close() + } + return nil +} + +type Reader_ struct { + R io.Reader + lbuf []byte +} + +func NewReader(r io.Reader) ReadCloser { + return &Reader_{r, make([]byte, 4)} +} + +func (s *Reader_) ReadMsg(msg []byte) (int, error) { + if _, err := io.ReadFull(s.R, s.lbuf); err != nil { + return 0, err + } + length := int(NBO.Uint32(s.lbuf)) + if length < 0 || length > len(msg) { + return 0, io.ErrShortBuffer + } + _, err := io.ReadFull(s.R, msg[:length]) + return length, err +} + +func (s *Reader_) Close() error { + if c, ok := s.R.(io.Closer); ok { + return c.Close() + } + return nil +} + +type ReadWriter_ struct { + Reader + Writer +} + +func NewReadWriter(rw io.ReadWriter) ReadWriter { + return &ReadWriter_{ + Reader: NewReader(rw), + Writer: NewWriter(rw), + } +} + +func (rw *ReadWriter_) Close() error { + if w, ok := rw.Writer.(WriteCloser); ok { + return w.Close() + } + if r, ok := rw.Reader.(ReadCloser); ok { + return r.Close() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio_test.go b/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio_test.go new file mode 100644 index 000000000..61cb7f85e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio_test.go @@ -0,0 +1,54 @@ +package msgio + +import ( + "bytes" + randbuf "github.com/jbenet/go-randbuf" + "io" + "math/rand" + "testing" + "time" +) + +func TestReaderWriter(t *testing.T) { + buf := bytes.NewBuffer(nil) + writer := NewWriter(buf) + reader := NewReader(buf) + msgs := [1000][]byte{} + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range msgs { + msgs[i] = randbuf.RandBuf(r, r.Intn(1000)) + err := writer.WriteMsg(msgs[i]) + if err != nil { + t.Fatal(err) + } + } + + if err := writer.Close(); err != nil { + t.Fatal(err) + } + + for i := 0; ; i++ { + msg2 := make([]byte, 1000) + n, err := reader.ReadMsg(msg2) + if err != nil { + if err == io.EOF { + if i < len(msg2) { + t.Error("failed to read all messages", len(msgs), i) + } + break + } + t.Error("unexpected error", err) + } + + msg1 := msgs[i] + msg2 = msg2[:n] + if !bytes.Equal(msg1, msg2) { + t.Fatal("message retrieved not equal\n", msg1, "\n\n", msg2) + } + } + + if err := reader.Close(); err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md new file mode 100644 index 000000000..b63ffa2da --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md @@ -0,0 +1,66 @@ +# go-multiaddr + +[multiaddr](https://github.com/jbenet/multiaddr) implementation in Go. + +## Example + +### Simple + +```go +import "github.com/jbenet/go-multiaddr" + +m := multiaddr.NewMultiaddr("/ip4/127.0.0.1/udp/1234") +// +m.buffer +// +m.String() +// /ip4/127.0.0.1/udp/1234 + +// construct with Buffer +m = multiaddr.Multiaddr{ Bytes: m.Bytes } +// +``` + +### Protocols + +```go +// get the multiaddr protocol description objects +addr.Protocols() +// []*Protocol{ +// &Protocol{ Code: 4, Name: 'ip4', Size: 32}, +// &Protocol{ Code: 17, Name: 'udp', Size: 16}, +// } +``` + +### Other formats + +```go +// handles the stupid url version too +m = multiaddr.NewUrl("udp4://127.0.0.1:1234") +// +m.Url(buf) +// udp4://127.0.0.1:1234 +``` + +### En/decapsulate + +```go +m.Encapsulate(m.NewMultiaddr("/sctp/5678")) +// +m.Decapsulate(m.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr +// +``` + +### Tunneling + +Multiaddr allows expressing tunnels very nicely. + +```js +printer := multiaddr.NewMultiaddr("/ip4/192.168.0.13/tcp/80") +proxy := multiaddr.NewMultiaddr("/ip4/10.20.30.40/tcp/443") +printerOverProxy := proxy.Encapsulate(printer) +// + +proxyAgain := printerOverProxy.Decapsulate(multiaddr.NewMultiaddr("/ip4")) +// +``` diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go new file mode 100644 index 000000000..ca0400a99 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go @@ -0,0 +1,96 @@ +package multiaddr + +import ( + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" +) + +func StringToBytes(s string) ([]byte, error) { + b := []byte{} + sp := strings.Split(s, "/") + + // consume first empty elem + sp = sp[1:] + + for len(sp) > 0 { + p := ProtocolWithName(sp[0]) + if p == nil { + return nil, fmt.Errorf("no protocol with name %s", sp[0]) + } + b = append(b, byte(p.Code)) + + a := AddressStringToBytes(p, sp[1]) + b = append(b, a...) + + sp = sp[2:] + } + return b, nil +} + +func BytesToString(b []byte) (ret string, err error) { + // panic handler, in case we try accessing bytes incorrectly. + defer func() { + if e := recover(); e != nil { + ret = "" + err = e.(error) + } + }() + + s := "" + + for len(b) > 0 { + p := ProtocolWithCode(int(b[0])) + if p == nil { + return "", fmt.Errorf("no protocol with code %d", b[0]) + } + s = strings.Join([]string{s, "/", p.Name}, "") + b = b[1:] + + a := AddressBytesToString(p, b[:(p.Size/8)]) + if len(a) > 0 { + s = strings.Join([]string{s, "/", a}, "") + } + b = b[(p.Size / 8):] + } + + return s, nil +} + +func AddressStringToBytes(p *Protocol, s string) []byte { + switch p.Code { + + // ipv4,6 + case 4, 41: + return net.ParseIP(s).To4() + + // tcp udp dccp sctp + case 6, 17, 33, 132: + b := make([]byte, 2) + i, err := strconv.Atoi(s) + if err == nil { + binary.BigEndian.PutUint16(b, uint16(i)) + } + return b + } + + return []byte{} +} + +func AddressBytesToString(p *Protocol, b []byte) string { + switch p.Code { + + // ipv4,6 + case 4, 41: + return net.IP(b).String() + + // tcp udp dccp sctp + case 6, 17, 33, 132: + i := binary.BigEndian.Uint16(b) + return strconv.Itoa(int(i)) + } + + return "" +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go new file mode 100644 index 000000000..413a971f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go @@ -0,0 +1,102 @@ +package multiaddr + +import ( + "fmt" + "strings" +) + +type Multiaddr struct { + Bytes []byte +} + +func NewMultiaddr(s string) (*Multiaddr, error) { + b, err := StringToBytes(s) + if err != nil { + return nil, err + } + return &Multiaddr{Bytes: b}, nil +} + +func (m *Multiaddr) String() (string, error) { + return BytesToString(m.Bytes) +} + +func (m *Multiaddr) Protocols() (ret []*Protocol, err error) { + + // panic handler, in case we try accessing bytes incorrectly. + defer func() { + if e := recover(); e != nil { + ret = nil + err = e.(error) + } + }() + + ps := []*Protocol{} + b := m.Bytes[:] + for len(b) > 0 { + p := ProtocolWithCode(int(b[0])) + if p == nil { + return nil, fmt.Errorf("no protocol with code %d", b[0]) + } + ps = append(ps, p) + b = b[1+(p.Size/8):] + } + return ps, nil +} + +func (m *Multiaddr) Encapsulate(o *Multiaddr) *Multiaddr { + b := make([]byte, len(m.Bytes)+len(o.Bytes)) + b = append(m.Bytes, o.Bytes...) + return &Multiaddr{Bytes: b} +} + +func (m *Multiaddr) Decapsulate(o *Multiaddr) (*Multiaddr, error) { + s1, err := m.String() + if err != nil { + return nil, err + } + + s2, err := o.String() + if err != nil { + return nil, err + } + + i := strings.LastIndex(s1, s2) + if i < 0 { + return nil, fmt.Errorf("%s not contained in %s", s2, s1) + } + return NewMultiaddr(s1[:i]) +} + +func (m *Multiaddr) DialArgs() (string, string, error) { + if !m.IsThinWaist() { + return "", "", fmt.Errorf("%s is not a 'thin waist' address.", m) + } + + str, err := m.String() + if err != nil { + return "", "", err + } + + parts := strings.Split(str, "/")[1:] + network := parts[2] + host := strings.Join([]string{parts[1], parts[3]}, ":") + return network, host, nil +} + +func (m *Multiaddr) IsThinWaist() bool { + p, err := m.Protocols() + if err != nil { + return false + } + + if p[0].Code != P_IP4 && p[0].Code != P_IP6 { + return false + } + + if p[1].Code != P_TCP && p[1].Code != P_UDP { + return false + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go new file mode 100644 index 000000000..976e09556 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go @@ -0,0 +1,129 @@ +package multiaddr + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func TestStringToBytes(t *testing.T) { + + testString := func(s string, h string) { + b1, err := hex.DecodeString(h) + if err != nil { + t.Error("failed to decode hex", h) + } + + b2, err := StringToBytes(s) + if err != nil { + t.Error("failed to convert", s) + } + + if !bytes.Equal(b1, b2) { + t.Error("failed to convert", s, "to", b1, "got", b2) + } + } + + testString("/ip4/127.0.0.1/udp/1234", "047f0000011104d2") +} + +func TestBytesToString(t *testing.T) { + + testString := func(s1 string, h string) { + b, err := hex.DecodeString(h) + if err != nil { + t.Error("failed to decode hex", h) + } + + s2, err := BytesToString(b) + if err != nil { + t.Error("failed to convert", b) + } + + if s1 != s2 { + t.Error("failed to convert", b, "to", s1, "got", s2) + } + } + + testString("/ip4/127.0.0.1/udp/1234", "047f0000011104d2") +} + +func TestProtocols(t *testing.T) { + m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Error("failed to construct", "/ip4/127.0.0.1/udp/1234") + } + + ps, err := m.Protocols() + if err != nil { + t.Error("failed to get protocols", "/ip4/127.0.0.1/udp/1234") + } + + if ps[0] != ProtocolWithName("ip4") { + t.Error(ps[0], ProtocolWithName("ip4")) + t.Error("failed to get ip4 protocol") + } + + if ps[1] != ProtocolWithName("udp") { + t.Error(ps[1], ProtocolWithName("udp")) + t.Error("failed to get udp protocol") + } + +} + +func TestEncapsulate(t *testing.T) { + m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Error(err) + } + + m2, err := NewMultiaddr("/udp/5678") + if err != nil { + t.Error(err) + } + + b := m.Encapsulate(m2) + if s, _ := b.String(); s != "/ip4/127.0.0.1/udp/1234/udp/5678" { + t.Error("encapsulate /ip4/127.0.0.1/udp/1234/udp/5678 failed.", s) + } + + m3, _ := NewMultiaddr("/udp/5678") + c, err := b.Decapsulate(m3) + if err != nil { + t.Error("decapsulate /udp failed.", err) + } + + if s, _ := c.String(); s != "/ip4/127.0.0.1/udp/1234" { + t.Error("decapsulate /udp failed.", "/ip4/127.0.0.1/udp/1234", s) + } + + m4, _ := NewMultiaddr("/ip4/127.0.0.1") + d, err := c.Decapsulate(m4) + if err != nil { + t.Error("decapsulate /ip4 failed.", err) + } + + if s, _ := d.String(); s != "" { + t.Error("decapsulate /ip4 failed.", "/", s) + } +} + +func TestDialArgs(t *testing.T) { + m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Fatal("failed to construct", "/ip4/127.0.0.1/udp/1234") + } + + nw, host, err := m.DialArgs() + if err != nil { + t.Fatal("failed to get dial args", "/ip4/127.0.0.1/udp/1234", err) + } + + if nw != "udp" { + t.Error("failed to get udp network Dial Arg") + } + + if host != "127.0.0.1:1234" { + t.Error("failed to get host:port Dial Arg") + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.csv b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.csv new file mode 100644 index 000000000..62bc5c217 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.csv @@ -0,0 +1,9 @@ +code size name +4 32 ip4 +6 16 tcp +17 16 udp +33 16 dccp +41 128 ip6 +132 16 sctp +480 0 http +443 0 https diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.go new file mode 100644 index 000000000..e08d01f07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/protocols.go @@ -0,0 +1,51 @@ +package multiaddr + +type Protocol struct { + Code int + Size int + Name string +} + +// replicating table here to: +// 1. avoid parsing the csv +// 2. ensuring errors in the csv don't screw up code. +// 3. changing a number has to happen in two places. + +const ( + P_IP4 = 4 + P_TCP = 6 + P_UDP = 17 + P_DCCP = 33 + P_IP6 = 41 + P_SCTP = 132 +) + +var Protocols = []*Protocol{ + &Protocol{P_IP4, 32, "ip4"}, + &Protocol{P_TCP, 16, "tcp"}, + &Protocol{P_UDP, 16, "udp"}, + &Protocol{P_DCCP, 16, "dccp"}, + &Protocol{P_IP6, 128, "ip6"}, + // these require varint: + &Protocol{P_SCTP, 16, "sctp"}, + // {480, 0, "http"}, + // {443, 0, "https"}, +} + +func ProtocolWithName(s string) *Protocol { + for _, p := range Protocols { + if p.Name == s { + return p + } + } + return nil +} + +func ProtocolWithCode(c int) *Protocol { + for _, p := range Protocols { + if p.Code == c { + return p + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multihash/README.md b/Godeps/_workspace/src/github.com/jbenet/go-multihash/README.md new file mode 100644 index 000000000..724b4d515 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multihash/README.md @@ -0,0 +1,43 @@ +# go-multihash + +[multihash](//github.com/jbenet/multihash) implementation in Go. + +## Example + +```go +package main + +import ( + "encoding/hex" + "fmt" + "github.com/jbenet/go-multihash" +) + +func main() { + // ignores errors for simplicity. + // don't do that at home. + + buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") + mhbuf, _ := multihash.EncodeName(buf, "sha1"); + mhhex := hex.EncodeToString(mhbuf) + fmt.Printf("hex: %v\n", mhhex); + + o, _ := multihash.Decode(mhbuf); + mhhex = hex.EncodeToString(o.Digest); + fmt.Printf("obj: %v 0x%x %d %s\n", o.Name, o.Code, o.Length, mhhex); +} +``` + +Run [test/foo.go](test/foo.go) + +``` +> cd test/ +> go build +> ./test +hex: 11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 +obj: sha1 0x11 20 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 +``` + +## License + +MIT diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash.go b/Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash.go new file mode 100644 index 000000000..a56bc162b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash.go @@ -0,0 +1,161 @@ +package multihash + +import ( + "encoding/hex" + "fmt" + b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" +) + +// constants +const SHA1 = 0x11 +const SHA2_256 = 0x12 +const SHA2_512 = 0x13 +const SHA3 = 0x14 +const BLAKE2B = 0x40 +const BLAKE2S = 0x41 + +var Names = map[string]int{ + "sha1": 0x11, + "sha2-256": 0x12, + "sha2-512": 0x13, + "sha3": 0x14, + "blake2b": 0x40, + "blake2s": 0x41, +} + +var Codes = map[int]string{ + 0x11: "sha1", + 0x12: "sha2-256", + 0x13: "sha2-512", + 0x14: "sha3", + 0x40: "blake2b", + 0x41: "blake2s", +} + +var DefaultLengths = map[int]int{ + 0x11: 20, + 0x12: 32, + 0x13: 64, + 0x14: 64, + 0x40: 64, + 0x41: 32, +} + +type DecodedMultihash struct { + Code int + Name string + Length int + Digest []byte +} + +type Multihash []byte + +func (m Multihash) HexString() string { + return hex.EncodeToString([]byte(m)) +} + +func FromHexString(s string) (Multihash, error) { + b, err := hex.DecodeString(s) + if err != nil { + return Multihash{}, err + } + + return Cast(b) +} + +func (m Multihash) B58String() string { + return b58.Encode([]byte(m)) +} + +func FromB58String(s string) (m Multihash, err error) { + // panic handler, in case we try accessing bytes incorrectly. + defer func() { + if e := recover(); e != nil { + m = Multihash{} + err = e.(error) + } + }() + + //b58 smells like it can panic... + b := b58.Decode(s) + return Cast(b) +} + +func Cast(buf []byte) (Multihash, error) { + dm, err := Decode(buf) + if err != nil { + return Multihash{}, err + } + + if !ValidCode(dm.Code) { + return Multihash{}, fmt.Errorf("unknown multihash code") + } + + return Multihash(buf), nil +} + +// Decodes a hash from the given Multihash. +func Decode(buf []byte) (*DecodedMultihash, error) { + + if len(buf) < 3 { + return nil, fmt.Errorf("multihash too short. must be > 3 bytes.") + } + + if len(buf) > 129 { + return nil, fmt.Errorf("multihash too long. must be < 129 bytes.") + } + + dm := &DecodedMultihash{ + Code: int(uint8(buf[0])), + Name: Codes[int(uint8(buf[0]))], + Length: int(uint8(buf[1])), + Digest: buf[2:], + } + + if len(dm.Digest) != dm.Length { + return nil, fmt.Errorf("multihash length inconsistent: %v", dm) + } + + return dm, nil +} + +// Encodes a hash digest along with the specified function code. +// Note: the length is derived from the length of the digest itself. +func Encode(buf []byte, code int) ([]byte, error) { + + if !ValidCode(code) { + return nil, fmt.Errorf("unknown multihash code") + } + + if len(buf) > 127 { + m := "multihash does not yet support digests longer than 127 bytes." + return nil, fmt.Errorf(m) + } + + pre := make([]byte, 2) + pre[0] = byte(uint8(code)) + pre[1] = byte(uint8(len(buf))) + return append(pre, buf...), nil +} + +func EncodeName(buf []byte, name string) ([]byte, error) { + return Encode(buf, Names[name]) +} + +// Checks whether a multihash code is valid. +func ValidCode(code int) bool { + if AppCode(code) { + return true + } + + if _, ok := Codes[code]; ok { + return true + } + + return false +} + +// Checks whether a multihash code is part of the App range. +func AppCode(code int) bool { + return code >= 0 && code < 0x10 +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash_test.go new file mode 100644 index 000000000..fac9f721b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multihash/multihash_test.go @@ -0,0 +1,192 @@ +package multihash + +import ( + "bytes" + "encoding/hex" + "testing" +) + +// maybe silly, but makes it so changing +// the table accidentally has to happen twice. +var tCodes = map[int]string{ + 0x11: "sha1", + 0x12: "sha2-256", + 0x13: "sha2-512", + 0x14: "sha3", + 0x40: "blake2b", + 0x41: "blake2s", +} + +type TestCase struct { + hex string + code int + name string +} + +var testCases = []TestCase{ + TestCase{"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", 0x11, "sha1"}, + TestCase{"0beec7b5", 0x11, "sha1"}, + TestCase{"2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", 0x12, "sha2-256"}, + TestCase{"2c26b46b", 0x12, "sha2-256"}, + TestCase{"0beec7b5ea3f0fdbc9", 0x40, "blake2b"}, +} + +func TestEncode(t *testing.T) { + for _, tc := range testCases { + ob, err := hex.DecodeString(tc.hex) + if err != nil { + t.Error(err) + continue + } + + pre := make([]byte, 2) + pre[0] = byte(uint8(tc.code)) + pre[1] = byte(uint8(len(ob))) + nb := append(pre, ob...) + + encC, err := Encode(ob, tc.code) + if err != nil { + t.Error(err) + continue + } + + if !bytes.Equal(encC, nb) { + t.Error("encoded byte mismatch: ", encC, nb) + } + + encN, err := EncodeName(ob, tc.name) + if err != nil { + t.Error(err) + continue + } + + if !bytes.Equal(encN, nb) { + t.Error("encoded byte mismatch: ", encN, nb) + } + } +} + +func TestDecode(t *testing.T) { + for _, tc := range testCases { + ob, err := hex.DecodeString(tc.hex) + if err != nil { + t.Error(err) + continue + } + + pre := make([]byte, 2) + pre[0] = byte(uint8(tc.code)) + pre[1] = byte(uint8(len(ob))) + nb := append(pre, ob...) + + dec, err := Decode(nb) + if err != nil { + t.Error(err) + continue + } + + if dec.Code != tc.code { + t.Error("decoded code mismatch: ", dec.Code, tc.code) + } + + if dec.Name != tc.name { + t.Error("decoded name mismatch: ", dec.Name, tc.name) + } + + if dec.Length != len(ob) { + t.Error("decoded length mismatch: ", dec.Length, len(ob)) + } + + if !bytes.Equal(dec.Digest, ob) { + t.Error("decoded byte mismatch: ", dec.Digest, ob) + } + } +} + +func TestTable(t *testing.T) { + for k, v := range tCodes { + if Codes[k] != v { + t.Error("Table mismatch: ", Codes[k], v) + } + if Names[v] != k { + t.Error("Table mismatch: ", Names[v], k) + } + } +} + +func TestValidCode(t *testing.T) { + for i := 0; i < 0xff; i++ { + _, ok := tCodes[i] + b := AppCode(i) || ok + + if ValidCode(i) != b { + t.Error("ValidCode incorrect for: ", i) + } + } +} + +func TestAppCode(t *testing.T) { + for i := 0; i < 0xff; i++ { + b := i > 0 && i < 0x10 + if AppCode(i) != b { + t.Error("AppCode incorrect for: ", i) + } + } +} + +func TestCast(t *testing.T) { + for _, tc := range testCases { + ob, err := hex.DecodeString(tc.hex) + if err != nil { + t.Error(err) + continue + } + + pre := make([]byte, 2) + pre[0] = byte(uint8(tc.code)) + pre[1] = byte(uint8(len(ob))) + nb := append(pre, ob...) + + if _, err := Cast(nb); err != nil { + t.Error(err) + continue + } + + if _, err = Cast(ob); err == nil { + t.Error("cast failed to detect non-multihash") + continue + } + } +} + +func TestHex(t *testing.T) { + for _, tc := range testCases { + ob, err := hex.DecodeString(tc.hex) + if err != nil { + t.Error(err) + continue + } + + pre := make([]byte, 2) + pre[0] = byte(uint8(tc.code)) + pre[1] = byte(uint8(len(ob))) + nb := append(pre, ob...) + + hs := hex.EncodeToString(nb) + mh, err := FromHexString(hs) + if err != nil { + t.Error(err) + continue + } + + if !bytes.Equal(mh, nb) { + t.Error("FromHexString failed", nb, mh) + continue + } + + if mh.HexString() != hs { + t.Error("Multihash.HexString failed", hs, mh.HexString) + continue + } + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multihash/sum.go b/Godeps/_workspace/src/github.com/jbenet/go-multihash/sum.go new file mode 100644 index 000000000..767be814e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multihash/sum.go @@ -0,0 +1,68 @@ +package multihash + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "fmt" + sha3 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3" +) + +func Sum(data []byte, code int, length int) (Multihash, error) { + m := Multihash{} + err := error(nil) + if !ValidCode(code) { + return m, fmt.Errorf("invalid multihash code %d", code) + } + + var d []byte + switch code { + case SHA1: + d = sumSHA1(data) + case SHA2_256: + d = sumSHA256(data) + case SHA2_512: + d = sumSHA512(data) + case SHA3: + d, err = sumSHA3(data) + default: + return m, fmt.Errorf("Function not implemented. Complain to lib maintainer.") + } + + if err != nil { + return m, err + } + + if length < 0 { + var ok bool + length, ok = DefaultLengths[code] + if !ok { + return m, fmt.Errorf("no default length for code %d", code) + } + } + + return Encode(d[0:length], code) +} + +func sumSHA1(data []byte) []byte { + a := sha1.Sum(data) + return a[0:20] +} + +func sumSHA256(data []byte) []byte { + a := sha256.Sum256(data) + return a[0:32] +} + +func sumSHA512(data []byte) []byte { + a := sha512.Sum512(data) + return a[0:64] +} + +func sumSHA3(data []byte) ([]byte, error) { + h := sha3.NewKeccak512() + if _, err := h.Write(data); err != nil { + return nil, err + } + return h.Sum(nil), nil +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multihash/sum_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multihash/sum_test.go new file mode 100644 index 000000000..3431d6f67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multihash/sum_test.go @@ -0,0 +1,59 @@ +package multihash + +import ( + "bytes" + "testing" +) + +type SumTestCase struct { + code int + length int + input string + hex string +} + +var sumTestCases = []SumTestCase{ + SumTestCase{SHA1, -1, "foo", "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"}, + SumTestCase{SHA1, 10, "foo", "110a0beec7b5ea3f0fdbc95d"}, + SumTestCase{SHA2_256, -1, "foo", "12202c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"}, + SumTestCase{SHA2_256, 16, "foo", "12102c26b46b68ffc68ff99b453c1d304134"}, + SumTestCase{SHA2_512, -1, "foo", "1340f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7"}, + SumTestCase{SHA2_512, 32, "foo", "1320f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc663832"}, +} + +func TestSum(t *testing.T) { + + for _, tc := range sumTestCases { + + m1, err := FromHexString(tc.hex) + if err != nil { + t.Error(err) + continue + } + + m2, err := Sum([]byte(tc.input), tc.code, tc.length) + if err != nil { + t.Error(tc.code, "sum failed.", err) + continue + } + + if !bytes.Equal(m1, m2) { + t.Error(tc.code, "sum failed.", m1, m2) + } + + s1 := m1.HexString() + if s1 != tc.hex { + t.Error("hex strings not the same") + } + + s2 := m1.B58String() + m3, err := FromB58String(s2) + if err != nil { + t.Error("failed to decode b58") + } else if !bytes.Equal(m3, m1) { + t.Error("b58 failing bytes") + } else if s2 != m3.B58String() { + t.Error("b58 failing string") + } + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multihash/test/foo.go b/Godeps/_workspace/src/github.com/jbenet/go-multihash/test/foo.go new file mode 100644 index 000000000..21d33d658 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multihash/test/foo.go @@ -0,0 +1,21 @@ +package main + +import ( + "encoding/hex" + "fmt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" +) + +func main() { + // ignores errors for simplicity. + // don't do that at home. + + buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") + mhbuf, _ := multihash.EncodeName(buf, "sha1") + mhhex := hex.EncodeToString(mhbuf) + fmt.Printf("hex: %v\n", mhhex) + + o, _ := multihash.Decode(mhbuf) + mhhex = hex.EncodeToString(o.Digest) + fmt.Printf("obj: %v 0x%x %d %s\n", o.Name, o.Code, o.Length, mhhex) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go new file mode 100644 index 000000000..dc7ced697 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go @@ -0,0 +1,216 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +var ( + errBatchTooShort = errors.New("leveldb: batch is too short") + errBatchBadRecord = errors.New("leveldb: bad record in batch") +) + +const kBatchHdrLen = 8 + 4 + +type batchReplay interface { + put(key, value []byte, seq uint64) + delete(key []byte, seq uint64) +} + +// Batch is a write batch. +type Batch struct { + buf []byte + rLen, bLen int + seq uint64 + sync bool +} + +func (b *Batch) grow(n int) { + off := len(b.buf) + if off == 0 { + // include headers + off = kBatchHdrLen + n += off + } + if cap(b.buf)-off >= n { + return + } + buf := make([]byte, 2*cap(b.buf)+n) + copy(buf, b.buf) + b.buf = buf[:off] +} + +func (b *Batch) appendRec(t vType, key, value []byte) { + n := 1 + binary.MaxVarintLen32 + len(key) + if t == tVal { + n += binary.MaxVarintLen32 + len(value) + } + b.grow(n) + off := len(b.buf) + buf := b.buf[:off+n] + buf[off] = byte(t) + off += 1 + off += binary.PutUvarint(buf[off:], uint64(len(key))) + copy(buf[off:], key) + off += len(key) + if t == tVal { + off += binary.PutUvarint(buf[off:], uint64(len(value))) + copy(buf[off:], value) + off += len(value) + } + b.buf = buf[:off] + b.rLen++ + // Include 8-byte ikey header + b.bLen += len(key) + len(value) + 8 +} + +// Put appends 'put operation' of the given key/value pair to the batch. +// It is safe to modify the contents of the argument after Put returns. +func (b *Batch) Put(key, value []byte) { + b.appendRec(tVal, key, value) +} + +// Delete appends 'delete operation' of the given key to the batch. +// It is safe to modify the contents of the argument after Delete returns. +func (b *Batch) Delete(key []byte) { + b.appendRec(tDel, key, nil) +} + +// Reset resets the batch. +func (b *Batch) Reset() { + b.buf = nil + b.seq = 0 + b.rLen = 0 + b.bLen = 0 + b.sync = false +} + +func (b *Batch) init(sync bool) { + b.sync = sync +} + +func (b *Batch) put(key, value []byte, seq uint64) { + if b.rLen == 0 { + b.seq = seq + } + b.Put(key, value) +} + +func (b *Batch) delete(key []byte, seq uint64) { + if b.rLen == 0 { + b.seq = seq + } + b.Delete(key) +} + +func (b *Batch) append(p *Batch) { + if p.rLen > 0 { + b.grow(len(p.buf) - kBatchHdrLen) + b.buf = append(b.buf, p.buf[kBatchHdrLen:]...) + b.rLen += p.rLen + } + if p.sync { + b.sync = true + } +} + +func (b *Batch) len() int { + return b.rLen +} + +func (b *Batch) size() int { + return b.bLen +} + +func (b *Batch) encode() []byte { + b.grow(0) + binary.LittleEndian.PutUint64(b.buf, b.seq) + binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen)) + + return b.buf +} + +func (b *Batch) decode(buf []byte) error { + if len(buf) < kBatchHdrLen { + return errBatchTooShort + } + + b.seq = binary.LittleEndian.Uint64(buf) + b.rLen = int(binary.LittleEndian.Uint32(buf[8:])) + // No need to be precise at this point, it won't be used anyway + b.bLen = len(buf) - kBatchHdrLen + b.buf = buf + + return nil +} + +func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error { + off := kBatchHdrLen + for i := 0; i < b.rLen; i++ { + if off >= len(b.buf) { + return errors.New("leveldb: invalid batch record length") + } + + t := vType(b.buf[off]) + if t > tVal { + return errors.New("leveldb: invalid batch record type in batch") + } + off += 1 + + x, n := binary.Uvarint(b.buf[off:]) + off += n + if n <= 0 || off+int(x) > len(b.buf) { + return errBatchBadRecord + } + key := b.buf[off : off+int(x)] + off += int(x) + + var value []byte + if t == tVal { + x, n := binary.Uvarint(b.buf[off:]) + off += n + if n <= 0 || off+int(x) > len(b.buf) { + return errBatchBadRecord + } + value = b.buf[off : off+int(x)] + off += int(x) + } + + f(i, t, key, value) + } + + return nil +} + +func (b *Batch) replay(to batchReplay) error { + return b.decodeRec(func(i int, t vType, key, value []byte) { + switch t { + case tVal: + to.put(key, value, b.seq+uint64(i)) + case tDel: + to.delete(key, b.seq+uint64(i)) + } + }) +} + +func (b *Batch) memReplay(to *memdb.DB) error { + return b.decodeRec(func(i int, t vType, key, value []byte) { + ikey := newIKey(key, b.seq+uint64(i), t) + to.Put(ikey, value) + }) +} + +func (b *Batch) revertMemReplay(to *memdb.DB) error { + return b.decodeRec(func(i int, t vType, key, value []byte) { + ikey := newIKey(key, b.seq+uint64(i), t) + to.Delete(ikey) + }) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go new file mode 100644 index 000000000..a59a7b6a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go @@ -0,0 +1,120 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +type tbRec struct { + t vType + key, value []byte +} + +type testBatch struct { + rec []*tbRec +} + +func (p *testBatch) put(key, value []byte, seq uint64) { + p.rec = append(p.rec, &tbRec{tVal, key, value}) +} + +func (p *testBatch) delete(key []byte, seq uint64) { + p.rec = append(p.rec, &tbRec{tDel, key, nil}) +} + +func compareBatch(t *testing.T, b1, b2 *Batch) { + if b1.seq != b2.seq { + t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) + } + if b1.len() != b2.len() { + t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len()) + } + p1, p2 := new(testBatch), new(testBatch) + err := b1.replay(p1) + if err != nil { + t.Fatal("error when replaying batch 1: ", err) + } + err = b2.replay(p2) + if err != nil { + t.Fatal("error when replaying batch 2: ", err) + } + for i := range p1.rec { + r1, r2 := p1.rec[i], p2.rec[i] + if r1.t != r2.t { + t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t) + } + if !bytes.Equal(r1.key, r2.key) { + t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) + } + if r1.t == tVal { + if !bytes.Equal(r1.value, r2.value) { + t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) + } + } + } +} + +func TestBatch_EncodeDecode(t *testing.T) { + b1 := new(Batch) + b1.seq = 10009 + b1.Put([]byte("key1"), []byte("value1")) + b1.Put([]byte("key2"), []byte("value2")) + b1.Delete([]byte("key1")) + b1.Put([]byte("k"), []byte("")) + b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) + b1.Delete([]byte("key10000")) + b1.Delete([]byte("k")) + buf := b1.encode() + b2 := new(Batch) + err := b2.decode(buf) + if err != nil { + t.Error("error when decoding batch: ", err) + } + compareBatch(t, b1, b2) +} + +func TestBatch_Append(t *testing.T) { + b1 := new(Batch) + b1.seq = 10009 + b1.Put([]byte("key1"), []byte("value1")) + b1.Put([]byte("key2"), []byte("value2")) + b1.Delete([]byte("key1")) + b1.Put([]byte("foo"), []byte("foovalue")) + b1.Put([]byte("bar"), []byte("barvalue")) + b2a := new(Batch) + b2a.seq = 10009 + b2a.Put([]byte("key1"), []byte("value1")) + b2a.Put([]byte("key2"), []byte("value2")) + b2a.Delete([]byte("key1")) + b2b := new(Batch) + b2b.Put([]byte("foo"), []byte("foovalue")) + b2b.Put([]byte("bar"), []byte("barvalue")) + b2a.append(b2b) + compareBatch(t, b1, b2a) +} + +func TestBatch_Size(t *testing.T) { + b := new(Batch) + for i := 0; i < 2; i++ { + b.Put([]byte("key1"), []byte("value1")) + b.Put([]byte("key2"), []byte("value2")) + b.Delete([]byte("key1")) + b.Put([]byte("foo"), []byte("foovalue")) + b.Put([]byte("bar"), []byte("barvalue")) + mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) + b.memReplay(mem) + if b.size() != mem.Size() { + t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) + } + b.Reset() + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go new file mode 100644 index 000000000..7ad53eb7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +func randomString(r *rand.Rand, n int) []byte { + b := new(bytes.Buffer) + for i := 0; i < n; i++ { + b.WriteByte(' ' + byte(r.Intn(95))) + } + return b.Bytes() +} + +func compressibleStr(r *rand.Rand, frac float32, n int) []byte { + nn := int(float32(n) * frac) + rb := randomString(r, nn) + b := make([]byte, 0, n+nn) + for len(b) < n { + b = append(b, rb...) + } + return b[:n] +} + +type valueGen struct { + src []byte + pos int +} + +func newValueGen(frac float32) *valueGen { + v := new(valueGen) + r := rand.New(rand.NewSource(301)) + v.src = make([]byte, 0, 1048576+100) + for len(v.src) < 1048576 { + v.src = append(v.src, compressibleStr(r, frac, 100)...) + } + return v +} + +func (v *valueGen) get(n int) []byte { + if v.pos+n > len(v.src) { + v.pos = 0 + } + v.pos += n + return v.src[v.pos-n : v.pos] +} + +var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) + +type dbBench struct { + b *testing.B + stor storage.Storage + db *DB + + o *opt.Options + ro *opt.ReadOptions + wo *opt.WriteOptions + + keys, values [][]byte +} + +func openDBBench(b *testing.B, noCompress bool) *dbBench { + _, err := os.Stat(benchDB) + if err == nil { + err = os.RemoveAll(benchDB) + if err != nil { + b.Fatal("cannot remove old db: ", err) + } + } + + p := &dbBench{ + b: b, + o: &opt.Options{}, + ro: &opt.ReadOptions{}, + wo: &opt.WriteOptions{}, + } + p.stor, err = storage.OpenFile(benchDB) + if err != nil { + b.Fatal("cannot open stor: ", err) + } + if noCompress { + p.o.Compression = opt.NoCompression + } + + p.db, err = Open(p.stor, p.o) + if err != nil { + b.Fatal("cannot open db: ", err) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + return p +} + +func (p *dbBench) reopen() { + p.db.Close() + var err error + p.db, err = Open(p.stor, p.o) + if err != nil { + p.b.Fatal("Reopen: got error: ", err) + } +} + +func (p *dbBench) populate(n int) { + p.keys, p.values = make([][]byte, n), make([][]byte, n) + v := newValueGen(0.5) + for i := range p.keys { + p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) + } +} + +func (p *dbBench) randomize() { + m := len(p.keys) + times := m * 2 + r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) + for n := 0; n < times; n++ { + i, j := r1.Int()%m, r2.Int()%m + if i == j { + continue + } + p.keys[i], p.keys[j] = p.keys[j], p.keys[i] + p.values[i], p.values[j] = p.values[j], p.values[i] + } +} + +func (p *dbBench) writes(perBatch int) { + b := p.b + db := p.db + + n := len(p.keys) + m := n / perBatch + if n%perBatch > 0 { + m++ + } + batches := make([]Batch, m) + j := 0 + for i := range batches { + first := true + for ; j < n && ((j+1)%perBatch != 0 || first); j++ { + first = false + batches[i].Put(p.keys[j], p.values[j]) + } + } + runtime.GC() + + b.ResetTimer() + b.StartTimer() + for i := range batches { + err := db.Write(&(batches[i]), p.wo) + if err != nil { + b.Fatal("write failed: ", err) + } + } + b.StopTimer() + b.SetBytes(116) +} + +func (p *dbBench) gc() { + p.keys, p.values = nil, nil + runtime.GC() +} + +func (p *dbBench) puts() { + b := p.b + db := p.db + + b.ResetTimer() + b.StartTimer() + for i := range p.keys { + err := db.Put(p.keys[i], p.values[i], p.wo) + if err != nil { + b.Fatal("put failed: ", err) + } + } + b.StopTimer() + b.SetBytes(116) +} + +func (p *dbBench) fill() { + b := p.b + db := p.db + + perBatch := 10000 + batch := new(Batch) + for i, n := 0, len(p.keys); i < n; { + first := true + for ; i < n && ((i+1)%perBatch != 0 || first); i++ { + first = false + batch.Put(p.keys[i], p.values[i]) + } + err := db.Write(batch, p.wo) + if err != nil { + b.Fatal("write failed: ", err) + } + batch.Reset() + } +} + +func (p *dbBench) gets() { + b := p.b + db := p.db + + b.ResetTimer() + for i := range p.keys { + _, err := db.Get(p.keys[i], p.ro) + if err != nil { + b.Error("got error: ", err) + } + } + b.StopTimer() +} + +func (p *dbBench) seeks() { + b := p.b + + iter := p.newIter() + defer iter.Release() + b.ResetTimer() + for i := range p.keys { + if !iter.Seek(p.keys[i]) { + b.Error("value not found for: ", string(p.keys[i])) + } + } + b.StopTimer() +} + +func (p *dbBench) newIter() iterator.Iterator { + iter := p.db.NewIterator(nil, p.ro) + err := iter.Error() + if err != nil { + p.b.Fatal("cannot create iterator: ", err) + } + return iter +} + +func (p *dbBench) close() { + if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { + p.b.Log("Block pool stats: ", bp) + } + p.db.Close() + p.stor.Close() + os.RemoveAll(benchDB) + p.db = nil + p.keys = nil + p.values = nil + runtime.GC() + runtime.GOMAXPROCS(1) +} + +func BenchmarkDBWrite(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBWriteBatch(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1000) + p.close() +} + +func BenchmarkDBWriteUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBWriteBatchUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.writes(1000) + p.close() +} + +func BenchmarkDBWriteRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.randomize() + p.writes(1) + p.close() +} + +func BenchmarkDBWriteRandomSync(b *testing.B) { + p := openDBBench(b, false) + p.wo.Sync = true + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBOverwrite(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.writes(1) + p.close() +} + +func BenchmarkDBOverwriteRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.randomize() + p.writes(1) + p.close() +} + +func BenchmarkDBPut(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.puts() + p.close() +} + +func BenchmarkDBRead(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadGC(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadTable(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.reopen() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadReverse(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + iter.Last() + for iter.Prev() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadReverseTable(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.reopen() + p.gc() + + iter := p.newIter() + b.ResetTimer() + iter.Last() + for iter.Prev() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBSeek(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.seeks() + p.close() +} + +func BenchmarkDBSeekRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.randomize() + p.seeks() + p.close() +} + +func BenchmarkDBGet(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gets() + p.close() +} + +func BenchmarkDBGetRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.randomize() + p.gets() + p.close() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go new file mode 100644 index 000000000..49f82f0fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -0,0 +1,158 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package cache provides interface and implementation of a cache algorithms. +package cache + +import ( + "sync/atomic" +) + +// SetFunc is the function that will be called by Namespace.Get to create +// a cache object, if charge is less than one than the cache object will +// not be registered to cache tree, if value is nil then the cache object +// will not be created. +type SetFunc func() (charge int, value interface{}) + +// DelFin is the function that will be called as the result of a delete operation. +// Exist == true is indication that the object is exist, and pending == true is +// indication of deletion already happen but haven't done yet (wait for all handles +// to be released). And exist == false means the object doesn't exist. +type DelFin func(exist, pending bool) + +// PurgeFin is the function that will be called as the result of a purge operation. +type PurgeFin func(ns, key uint64) + +// Cache is a cache tree. A cache instance must be goroutine-safe. +type Cache interface { + // SetCapacity sets cache tree capacity. + SetCapacity(capacity int) + + // Capacity returns cache tree capacity. + Capacity() int + + // Used returns used cache tree capacity. + Used() int + + // Size returns entire alive cache objects size. + Size() int + + // NumObjects returns number of alive objects. + NumObjects() int + + // GetNamespace gets cache namespace with the given id. + // GetNamespace is never return nil. + GetNamespace(id uint64) Namespace + + // PurgeNamespace purges cache namespace with the given id from this cache tree. + // Also read Namespace.Purge. + PurgeNamespace(id uint64, fin PurgeFin) + + // ZapNamespace detaches cache namespace with the given id from this cache tree. + // Also read Namespace.Zap. + ZapNamespace(id uint64) + + // Purge purges all cache namespace from this cache tree. + // This is behave the same as calling Namespace.Purge method on all cache namespace. + Purge(fin PurgeFin) + + // Zap detaches all cache namespace from this cache tree. + // This is behave the same as calling Namespace.Zap method on all cache namespace. + Zap() +} + +// Namespace is a cache namespace. A namespace instance must be goroutine-safe. +type Namespace interface { + // Get gets cache object with the given key. + // If cache object is not found and setf is not nil, Get will atomically creates + // the cache object by calling setf. Otherwise Get will returns nil. + // + // The returned cache handle should be released after use by calling Release + // method. + Get(key uint64, setf SetFunc) Handle + + // Delete removes cache object with the given key from cache tree. + // A deleted cache object will be released as soon as all of its handles have + // been released. + // Delete only happen once, subsequent delete will consider cache object doesn't + // exist, even if the cache object ins't released yet. + // + // If not nil, fin will be called if the cache object doesn't exist or when + // finally be released. + // + // Delete returns true if such cache object exist and never been deleted. + Delete(key uint64, fin DelFin) bool + + // Purge removes all cache objects within this namespace from cache tree. + // This is the same as doing delete on all cache objects. + // + // If not nil, fin will be called on all cache objects when its finally be + // released. + Purge(fin PurgeFin) + + // Zap detaches namespace from cache tree and release all its cache objects. + // A zapped namespace can never be filled again. + // Calling Get on zapped namespace will always return nil. + Zap() +} + +// Handle is a cache handle. +type Handle interface { + // Release releases this cache handle. This method can be safely called mutiple + // times. + Release() + + // Value returns value of this cache handle. + // Value will returns nil after this cache handle have be released. + Value() interface{} +} + +const ( + DelNotExist = iota + DelExist + DelPendig +) + +// Namespace state. +type nsState int + +const ( + nsEffective nsState = iota + nsZapped +) + +// Node state. +type nodeState int + +const ( + nodeEffective nodeState = iota + nodeEvicted + nodeDeleted +) + +// Fake handle. +type fakeHandle struct { + value interface{} + fin func() + once uint32 +} + +func (h *fakeHandle) Value() interface{} { + if atomic.LoadUint32(&h.once) == 0 { + return h.value + } + return nil +} + +func (h *fakeHandle) Release() { + if !atomic.CompareAndSwapUint32(&h.once, 0, 1) { + return + } + if h.fin != nil { + h.fin() + h.fin = nil + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go new file mode 100644 index 000000000..6735e02ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go @@ -0,0 +1,547 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type releaserFunc struct { + fn func() + value interface{} +} + +func (r releaserFunc) Release() { + if r.fn != nil { + r.fn() + } +} + +func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle { + return ns.Get(key, func() (int, interface{}) { + if relf != nil { + return charge, releaserFunc{relf, value} + } else { + return charge, value + } + }) +} + +func TestCache_HitMiss(t *testing.T) { + cases := []struct { + key uint64 + value string + }{ + {1, "vvvvvvvvv"}, + {100, "v1"}, + {0, "v2"}, + {12346, "v3"}, + {777, "v4"}, + {999, "v5"}, + {7654, "v6"}, + {2, "v7"}, + {3, "v8"}, + {9, "v9"}, + } + + setfin := 0 + c := NewLRUCache(1000) + ns := c.GetNamespace(0) + for i, x := range cases { + set(ns, x.key, x.value, len(x.value), func() { + setfin++ + }).Release() + for j, y := range cases { + h := ns.Get(y.key, nil) + if j <= i { + // should hit + if h == nil { + t.Errorf("case '%d' iteration '%d' is miss", i, j) + } else { + if x := h.Value().(releaserFunc).value.(string); x != y.value { + t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) + } + } + } else { + // should miss + if h != nil { + t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) + } + } + if h != nil { + h.Release() + } + } + } + + for i, x := range cases { + finalizerOk := false + ns.Delete(x.key, func(exist, pending bool) { + finalizerOk = true + }) + + if !finalizerOk { + t.Errorf("case %d delete finalizer not executed", i) + } + + for j, y := range cases { + h := ns.Get(y.key, nil) + if j > i { + // should hit + if h == nil { + t.Errorf("case '%d' iteration '%d' is miss", i, j) + } else { + if x := h.Value().(releaserFunc).value.(string); x != y.value { + t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) + } + } + } else { + // should miss + if h != nil { + t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) + } + } + if h != nil { + h.Release() + } + } + } + + if setfin != len(cases) { + t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) + } +} + +func TestLRUCache_Eviction(t *testing.T) { + c := NewLRUCache(12) + ns := c.GetNamespace(0) + o1 := set(ns, 1, 1, 1, nil) + set(ns, 2, 2, 1, nil).Release() + set(ns, 3, 3, 1, nil).Release() + set(ns, 4, 4, 1, nil).Release() + set(ns, 5, 5, 1, nil).Release() + if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2 + h.Release() + } + set(ns, 9, 9, 10, nil).Release() // 5,2,9 + + for _, key := range []uint64{9, 2, 5, 1} { + h := ns.Get(key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + o1.Release() + for _, key := range []uint64{1, 2, 5} { + h := ns.Get(key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + for _, key := range []uint64{3, 4, 9} { + h := ns.Get(key, nil) + if h != nil { + t.Errorf("hit for key '%d'", key) + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } +} + +func TestLRUCache_SetGet(t *testing.T) { + c := NewLRUCache(13) + ns := c.GetNamespace(0) + for i := 0; i < 200; i++ { + n := uint64(rand.Intn(99999) % 20) + set(ns, n, n, 1, nil).Release() + if h := ns.Get(n, nil); h != nil { + if h.Value() == nil { + t.Errorf("key '%d' contains nil value", n) + } else { + if x := h.Value().(uint64); x != n { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x) + } + } + h.Release() + } else { + t.Errorf("key '%d' doesn't exist", n) + } + } +} + +func TestLRUCache_Purge(t *testing.T) { + c := NewLRUCache(3) + ns1 := c.GetNamespace(0) + o1 := set(ns1, 1, 1, 1, nil) + o2 := set(ns1, 2, 2, 1, nil) + ns1.Purge(nil) + set(ns1, 3, 3, 1, nil).Release() + for _, key := range []uint64{1, 2, 3} { + h := ns1.Get(key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + o1.Release() + o2.Release() + for _, key := range []uint64{1, 2} { + h := ns1.Get(key, nil) + if h != nil { + t.Errorf("hit for key '%d'", key) + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } +} + +type testingCacheObjectCounter struct { + created uint32 + released uint32 +} + +func (c *testingCacheObjectCounter) createOne() { + atomic.AddUint32(&c.created, 1) +} + +func (c *testingCacheObjectCounter) releaseOne() { + atomic.AddUint32(&c.released, 1) +} + +type testingCacheObject struct { + t *testing.T + cnt *testingCacheObjectCounter + + ns, key uint64 + + releaseCalled uint32 +} + +func (x *testingCacheObject) Release() { + if atomic.CompareAndSwapUint32(&x.releaseCalled, 0, 1) { + x.cnt.releaseOne() + } else { + x.t.Errorf("duplicate setfin NS#%d KEY#%s", x.ns, x.key) + } +} + +func TestLRUCache_Finalizer(t *testing.T) { + const ( + capacity = 100 + goroutines = 100 + iterations = 10000 + keymax = 8000 + ) + + runtime.GOMAXPROCS(runtime.NumCPU()) + defer runtime.GOMAXPROCS(1) + + wg := &sync.WaitGroup{} + cnt := &testingCacheObjectCounter{} + + c := NewLRUCache(capacity) + + type instance struct { + seed int64 + rnd *rand.Rand + ns uint64 + effective int32 + handles []Handle + handlesMap map[uint64]int + + delete bool + purge bool + zap bool + wantDel int32 + delfinCalledAll int32 + delfinCalledEff int32 + purgefinCalled int32 + } + + instanceGet := func(p *instance, ns Namespace, key uint64) { + h := ns.Get(key, func() (charge int, value interface{}) { + to := &testingCacheObject{ + t: t, cnt: cnt, + ns: p.ns, + key: key, + } + atomic.AddInt32(&p.effective, 1) + cnt.createOne() + return 1, releaserFunc{func() { + to.Release() + atomic.AddInt32(&p.effective, -1) + }, to} + }) + p.handles = append(p.handles, h) + p.handlesMap[key] = p.handlesMap[key] + 1 + } + instanceRelease := func(p *instance, ns Namespace, i int) { + h := p.handles[i] + key := h.Value().(releaserFunc).value.(*testingCacheObject).key + if n := p.handlesMap[key]; n == 0 { + t.Fatal("key ref == 0") + } else if n > 1 { + p.handlesMap[key] = n - 1 + } else { + delete(p.handlesMap, key) + } + h.Release() + p.handles = append(p.handles[:i], p.handles[i+1:]...) + p.handles[len(p.handles) : len(p.handles)+1][0] = nil + } + + seeds := make([]int64, goroutines) + instances := make([]instance, goroutines) + for i := range instances { + p := &instances[i] + p.handlesMap = make(map[uint64]int) + if seeds[i] == 0 { + seeds[i] = time.Now().UnixNano() + } + p.seed = seeds[i] + p.rnd = rand.New(rand.NewSource(p.seed)) + p.ns = uint64(i) + p.delete = i%6 == 0 + p.purge = i%8 == 0 + p.zap = i%12 == 0 || i%3 == 0 + } + + seedsStr := make([]string, len(seeds)) + for i, seed := range seeds { + seedsStr[i] = fmt.Sprint(seed) + } + t.Logf("seeds := []int64{%s}", strings.Join(seedsStr, ", ")) + + // Get and release. + for i := range instances { + p := &instances[i] + + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + for i := 0; i < iterations; i++ { + if len(p.handles) == 0 || p.rnd.Int()%2 == 0 { + instanceGet(p, ns, uint64(p.rnd.Intn(keymax))) + } else { + instanceRelease(p, ns, p.rnd.Intn(len(p.handles))) + } + } + }(p) + } + wg.Wait() + + if used, cap := c.Used(), c.Capacity(); used > cap { + t.Errorf("Used > capacity, used=%d cap=%d", used, cap) + } + + // Check effective objects. + for i := range instances { + p := &instances[i] + if int(p.effective) < len(p.handlesMap) { + t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap)) + } + } + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + // Delete and purge. + for i := range instances { + p := &instances[i] + p.wantDel = p.effective + + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + + if p.delete { + for key := uint64(0); key < keymax; key++ { + _, wantExist := p.handlesMap[key] + gotExist := ns.Delete(key, func(exist, pending bool) { + atomic.AddInt32(&p.delfinCalledAll, 1) + if exist { + atomic.AddInt32(&p.delfinCalledEff, 1) + } + }) + if !gotExist && wantExist { + t.Errorf("delete on NS#%d KEY#%d not found", p.ns, key) + } + } + + var delfinCalled int + for key := uint64(0); key < keymax; key++ { + func(key uint64) { + gotExist := ns.Delete(key, func(exist, pending bool) { + if exist && !pending { + t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.ns, key) + } + delfinCalled++ + }) + if gotExist { + t.Errorf("delete on NS#%d KEY#%d found", p.ns, key) + } + }(key) + } + if delfinCalled != keymax { + t.Errorf("(2) #%d not all delete fin called, diff=%d", p.ns, keymax-delfinCalled) + } + } + + if p.purge { + ns.Purge(func(ns, key uint64) { + atomic.AddInt32(&p.purgefinCalled, 1) + }) + } + }(p) + } + wg.Wait() + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + // Release. + for i := range instances { + p := &instances[i] + + if !p.zap { + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + for i := len(p.handles) - 1; i >= 0; i-- { + instanceRelease(p, ns, i) + } + }(p) + } + } + wg.Wait() + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + // Zap. + for i := range instances { + p := &instances[i] + + if p.zap { + wg.Add(1) + go func(p *instance) { + defer wg.Done() + + ns := c.GetNamespace(p.ns) + ns.Zap() + + p.handles = nil + p.handlesMap = nil + }(p) + } + } + wg.Wait() + + if want := int(cnt.created - cnt.released); c.Size() != want { + t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size()) + } + + if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used { + t.Errorf("Invalid used value, want=%d got=%d", notrel, used) + } + + c.Purge(nil) + + for i := range instances { + p := &instances[i] + + if p.delete { + if p.delfinCalledAll != keymax { + t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.ns, p.purge, p.zap, keymax-p.delfinCalledAll) + } + if p.delfinCalledEff != p.wantDel { + t.Errorf("#%d not all effective delete fin called, diff=%d", p.ns, p.wantDel-p.delfinCalledEff) + } + if p.purge && p.purgefinCalled > 0 { + t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.ns, p.delete, p.zap, p.purgefinCalled) + } + } else { + if p.purge { + if p.purgefinCalled != p.wantDel { + t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.ns, p.delete, p.zap, p.wantDel-p.purgefinCalled) + } + } + } + } + + if cnt.created != cnt.released { + t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released) + } +} + +func BenchmarkLRUCache_SetRelease(b *testing.B) { + capacity := b.N / 100 + if capacity <= 0 { + capacity = 10 + } + c := NewLRUCache(capacity) + ns := c.GetNamespace(0) + b.ResetTimer() + for i := uint64(0); i < uint64(b.N); i++ { + set(ns, i, nil, 1, nil).Release() + } +} + +func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) { + capacity := b.N / 100 + if capacity <= 0 { + capacity = 10 + } + c := NewLRUCache(capacity) + ns := c.GetNamespace(0) + b.ResetTimer() + + na := b.N / 2 + nb := b.N - na + + for i := uint64(0); i < uint64(na); i++ { + set(ns, i, nil, 1, nil).Release() + } + + for i := uint64(0); i < uint64(nb); i++ { + set(ns, i, nil, 1, nil).Release() + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go new file mode 100644 index 000000000..d99477b01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go @@ -0,0 +1,382 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "sync" + "sync/atomic" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// lruCache represent a LRU cache state. +type lruCache struct { + mu sync.Mutex + recent lruNode + table map[uint64]*lruNs + capacity int + used, size, alive int +} + +// NewLRUCache creates a new initialized LRU cache with the given capacity. +func NewLRUCache(capacity int) Cache { + c := &lruCache{ + table: make(map[uint64]*lruNs), + capacity: capacity, + } + c.recent.rNext = &c.recent + c.recent.rPrev = &c.recent + return c +} + +func (c *lruCache) Capacity() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.capacity +} + +func (c *lruCache) Used() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.used +} + +func (c *lruCache) Size() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.size +} + +func (c *lruCache) NumObjects() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.alive +} + +// SetCapacity set cache capacity. +func (c *lruCache) SetCapacity(capacity int) { + c.mu.Lock() + c.capacity = capacity + c.evict() + c.mu.Unlock() +} + +// GetNamespace return namespace object for given id. +func (c *lruCache) GetNamespace(id uint64) Namespace { + c.mu.Lock() + defer c.mu.Unlock() + + if ns, ok := c.table[id]; ok { + return ns + } + + ns := &lruNs{ + lru: c, + id: id, + table: make(map[uint64]*lruNode), + } + c.table[id] = ns + return ns +} + +func (c *lruCache) ZapNamespace(id uint64) { + c.mu.Lock() + if ns, exist := c.table[id]; exist { + ns.zapNB() + delete(c.table, id) + } + c.mu.Unlock() +} + +func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) { + c.mu.Lock() + if ns, exist := c.table[id]; exist { + ns.purgeNB(fin) + } + c.mu.Unlock() +} + +// Purge purge entire cache. +func (c *lruCache) Purge(fin PurgeFin) { + c.mu.Lock() + for _, ns := range c.table { + ns.purgeNB(fin) + } + c.mu.Unlock() +} + +func (c *lruCache) Zap() { + c.mu.Lock() + for _, ns := range c.table { + ns.zapNB() + } + c.table = make(map[uint64]*lruNs) + c.mu.Unlock() +} + +func (c *lruCache) evict() { + top := &c.recent + for n := c.recent.rPrev; c.used > c.capacity && n != top; { + n.state = nodeEvicted + n.rRemove() + n.derefNB() + c.used -= n.charge + n = c.recent.rPrev + } +} + +type lruNs struct { + lru *lruCache + id uint64 + table map[uint64]*lruNode + state nsState +} + +func (ns *lruNs) Get(key uint64, setf SetFunc) Handle { + ns.lru.mu.Lock() + + if ns.state != nsEffective { + ns.lru.mu.Unlock() + return nil + } + + node, ok := ns.table[key] + if ok { + switch node.state { + case nodeEvicted: + // Insert to recent list. + node.state = nodeEffective + node.ref++ + ns.lru.used += node.charge + ns.lru.evict() + fallthrough + case nodeEffective: + // Bump to front. + node.rRemove() + node.rInsert(&ns.lru.recent) + } + node.ref++ + } else { + if setf == nil { + ns.lru.mu.Unlock() + return nil + } + + charge, value := setf() + if value == nil { + ns.lru.mu.Unlock() + return nil + } + + node = &lruNode{ + ns: ns, + key: key, + value: value, + charge: charge, + ref: 1, + } + ns.table[key] = node + + ns.lru.size += charge + ns.lru.alive++ + if charge > 0 { + node.ref++ + node.rInsert(&ns.lru.recent) + ns.lru.used += charge + ns.lru.evict() + } + } + + ns.lru.mu.Unlock() + return &lruHandle{node: node} +} + +func (ns *lruNs) Delete(key uint64, fin DelFin) bool { + ns.lru.mu.Lock() + + if ns.state != nsEffective { + if fin != nil { + fin(false, false) + } + ns.lru.mu.Unlock() + return false + } + + node, exist := ns.table[key] + if !exist { + if fin != nil { + fin(false, false) + } + ns.lru.mu.Unlock() + return false + } + + switch node.state { + case nodeDeleted: + if fin != nil { + fin(true, true) + } + ns.lru.mu.Unlock() + return false + case nodeEffective: + ns.lru.used -= node.charge + node.state = nodeDeleted + node.delfin = fin + node.rRemove() + node.derefNB() + default: + node.state = nodeDeleted + node.delfin = fin + } + + ns.lru.mu.Unlock() + return true +} + +func (ns *lruNs) purgeNB(fin PurgeFin) { + if ns.state != nsEffective { + return + } + + for _, node := range ns.table { + switch node.state { + case nodeDeleted: + case nodeEffective: + ns.lru.used -= node.charge + node.state = nodeDeleted + node.purgefin = fin + node.rRemove() + node.derefNB() + default: + node.state = nodeDeleted + node.purgefin = fin + } + } +} + +func (ns *lruNs) Purge(fin PurgeFin) { + ns.lru.mu.Lock() + ns.purgeNB(fin) + ns.lru.mu.Unlock() +} + +func (ns *lruNs) zapNB() { + if ns.state != nsEffective { + return + } + + ns.state = nsZapped + + for _, node := range ns.table { + if node.state == nodeEffective { + ns.lru.used -= node.charge + node.rRemove() + } + ns.lru.size -= node.charge + node.state = nodeDeleted + node.fin() + } + ns.table = nil +} + +func (ns *lruNs) Zap() { + ns.lru.mu.Lock() + ns.zapNB() + delete(ns.lru.table, ns.id) + ns.lru.mu.Unlock() +} + +type lruNode struct { + ns *lruNs + + rNext, rPrev *lruNode + + key uint64 + value interface{} + charge int + ref int + state nodeState + delfin DelFin + purgefin PurgeFin +} + +func (n *lruNode) rInsert(at *lruNode) { + x := at.rNext + at.rNext = n + n.rPrev = at + n.rNext = x + x.rPrev = n +} + +func (n *lruNode) rRemove() bool { + if n.rPrev == nil { + return false + } + + n.rPrev.rNext = n.rNext + n.rNext.rPrev = n.rPrev + n.rPrev = nil + n.rNext = nil + + return true +} + +func (n *lruNode) fin() { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + if n.purgefin != nil { + n.purgefin(n.ns.id, n.key) + n.delfin = nil + n.purgefin = nil + } else if n.delfin != nil { + n.delfin(true, false) + n.delfin = nil + } +} + +func (n *lruNode) derefNB() { + n.ref-- + if n.ref == 0 { + if n.ns.state == nsEffective { + // Remove elemement. + delete(n.ns.table, n.key) + n.ns.lru.size -= n.charge + n.ns.lru.alive-- + n.fin() + } + n.value = nil + } else if n.ref < 0 { + panic("leveldb/cache: lruCache: negative node reference") + } +} + +func (n *lruNode) deref() { + n.ns.lru.mu.Lock() + n.derefNB() + n.ns.lru.mu.Unlock() +} + +type lruHandle struct { + node *lruNode + once uint32 +} + +func (h *lruHandle) Value() interface{} { + if atomic.LoadUint32(&h.once) == 0 { + return h.node.value + } + return nil +} + +func (h *lruHandle) Release() { + if !atomic.CompareAndSwapUint32(&h.once, 0, 1) { + return + } + h.node.deref() + h.node = nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go new file mode 100644 index 000000000..f4e9fb693 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + +type iComparer struct { + ucmp comparer.Comparer +} + +func (icmp *iComparer) uName() string { + return icmp.ucmp.Name() +} + +func (icmp *iComparer) uCompare(a, b []byte) int { + return icmp.ucmp.Compare(a, b) +} + +func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { + return icmp.ucmp.Separator(dst, a, b) +} + +func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { + return icmp.ucmp.Successor(dst, b) +} + +func (icmp *iComparer) Name() string { + return icmp.uName() +} + +func (icmp *iComparer) Compare(a, b []byte) int { + x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) + if x == 0 { + if m, n := iKey(a).num(), iKey(b).num(); m > n { + x = -1 + } else if m < n { + x = 1 + } + } + return x +} + +func (icmp *iComparer) Separator(dst, a, b []byte) []byte { + ua, ub := iKey(a).ukey(), iKey(b).ukey() + dst = icmp.ucmp.Separator(dst, ua, ub) + if dst == nil { + return nil + } + if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { + dst = append(dst, kMaxNumBytes...) + } else { + // Did not close possibilities that n maybe longer than len(ub). + dst = append(dst, a[len(a)-8:]...) + } + return dst +} + +func (icmp *iComparer) Successor(dst, b []byte) []byte { + ub := iKey(b).ukey() + dst = icmp.ucmp.Successor(dst, ub) + if dst == nil { + return nil + } + if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { + dst = append(dst, kMaxNumBytes...) + } else { + // Did not close possibilities that n maybe longer than len(ub). + dst = append(dst, b[len(b)-8:]...) + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go new file mode 100644 index 000000000..14dddf88d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go @@ -0,0 +1,51 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package comparer + +import "bytes" + +type bytesComparer struct{} + +func (bytesComparer) Compare(a, b []byte) int { + return bytes.Compare(a, b) +} + +func (bytesComparer) Name() string { + return "leveldb.BytewiseComparator" +} + +func (bytesComparer) Separator(dst, a, b []byte) []byte { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && a[i] == b[i]; i++ { + } + if i >= n { + // Do not shorten if one string is a prefix of the other + } else if c := a[i]; c < 0xff && c+1 < b[i] { + dst = append(dst, a[:i+1]...) + dst[i]++ + return dst + } + return nil +} + +func (bytesComparer) Successor(dst, b []byte) []byte { + for i, c := range b { + if c != 0xff { + dst = append(dst, b[:i+1]...) + dst[i]++ + return dst + } + } + return nil +} + +// DefaultComparer are default implementation of the Comparer interface. +// It uses the natural ordering, consistent with bytes.Compare. +var DefaultComparer = bytesComparer{} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go new file mode 100644 index 000000000..14a28f16f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go @@ -0,0 +1,57 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package comparer provides interface and implementation for ordering +// sets of data. +package comparer + +// BasicComparer is the interface that wraps the basic Compare method. +type BasicComparer interface { + // Compare returns -1, 0, or +1 depending on whether a is 'less than', + // 'equal to' or 'greater than' b. The two arguments can only be 'equal' + // if their contents are exactly equal. Furthermore, the empty slice + // must be 'less than' any non-empty slice. + Compare(a, b []byte) int +} + +// Comparer defines a total ordering over the space of []byte keys: a 'less +// than' relationship. +type Comparer interface { + BasicComparer + + // Name returns name of the comparer. + // + // The Level-DB on-disk format stores the comparer name, and opening a + // database with a different comparer from the one it was created with + // will result in an error. + // + // An implementation to a new name whenever the comparer implementation + // changes in a way that will cause the relative ordering of any two keys + // to change. + // + // Names starting with "leveldb." are reserved and should not be used + // by any users of this package. + Name() string + + // Bellow are advanced functions used used to reduce the space requirements + // for internal data structures such as index blocks. + + // Separator appends a sequence of bytes x to dst such that a <= x && x < b, + // where 'less than' is consistent with Compare. An implementation should + // return nil if x equal to a. + // + // Either contents of a or b should not by any means modified. Doing so + // may cause corruption on the internal state. + Separator(dst, a, b []byte) []byte + + // Successor appends a sequence of bytes x to dst such that x >= b, where + // 'less than' is consistent with Compare. An implementation should return + // nil if x equal to b. + // + // Contents of b should not by any means modified. Doing so may cause + // corruption on the internal state. + Successor(dst, b []byte) []byte +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go new file mode 100644 index 000000000..511058897 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +const ( + kNumLevels = 7 + + // Level-0 compaction is started when we hit this many files. + kL0_CompactionTrigger float64 = 4 + + // Soft limit on number of level-0 files. We slow down writes at this point. + kL0_SlowdownWritesTrigger = 8 + + // Maximum number of level-0 files. We stop writes at this point. + kL0_StopWritesTrigger = 12 + + // Maximum level to which a new compacted memdb is pushed if it + // does not create overlap. We try to push to level 2 to avoid the + // relatively expensive level 0=>1 compactions and to avoid some + // expensive manifest file operations. We do not push all the way to + // the largest level since that can generate a lot of wasted disk + // space if the same key space is being repeatedly overwritten. + kMaxMemCompactLevel = 2 + + // Maximum size of a table. + kMaxTableSize = 2 * 1048576 + + // Maximum bytes of overlaps in grandparent (i.e., level+2) before we + // stop building a single file in a level->level+1 compaction. + kMaxGrandParentOverlapBytes = 10 * kMaxTableSize + + // Maximum number of bytes in all compacted files. We avoid expanding + // the lower level file set of a compaction if it would make the + // total compaction cover more than this many bytes. + kExpCompactionMaxBytes = 25 * kMaxTableSize +) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go new file mode 100644 index 000000000..1f45a5a6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go @@ -0,0 +1,472 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "fmt" + "io" + "math/rand" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +const ctValSize = 1000 + +type dbCorruptHarness struct { + dbHarness +} + +func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { + h := new(dbCorruptHarness) + h.init(t, o) + return h +} + +func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { + return newDbCorruptHarnessWopt(t, &opt.Options{ + BlockCache: cache.NewLRUCache(100), + Strict: opt.StrictJournalChecksum, + }) +} + +func (h *dbCorruptHarness) recover() { + p := &h.dbHarness + t := p.t + + var err error + p.db, err = Recover(h.stor, h.o) + if err != nil { + t.Fatal("Repair: got error: ", err) + } +} + +func (h *dbCorruptHarness) build(n int) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := 0; i < n; i++ { + batch.Reset() + batch.Put(tkey(i), tval(i, ctValSize)) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := range rnd.Perm(n) { + batch.Reset() + batch.Put(tkey(i), tval(i, ctValSize)) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := 0; i < n; i++ { + batch.Reset() + batch.Delete(tkey(rnd.Intn(max))) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) { + p := &h.dbHarness + t := p.t + + var file storage.File + ff, _ := p.stor.GetFiles(ft) + for _, f := range ff { + if file == nil || f.Num() > file.Num() { + file = f + } + } + if file == nil { + t.Fatalf("no such file with type %q", ft) + } + + r, err := file.Open() + if err != nil { + t.Fatal("cannot open file: ", err) + } + x, err := r.Seek(0, 2) + if err != nil { + t.Fatal("cannot query file size: ", err) + } + m := int(x) + if _, err := r.Seek(0, 0); err != nil { + t.Fatal(err) + } + + if offset < 0 { + if -offset > m { + offset = 0 + } else { + offset = m + offset + } + } + if offset > m { + offset = m + } + if offset+n > m { + n = m - offset + } + + buf := make([]byte, m) + _, err = io.ReadFull(r, buf) + if err != nil { + t.Fatal("cannot read file: ", err) + } + r.Close() + + for i := 0; i < n; i++ { + buf[offset+i] ^= 0x80 + } + + err = file.Remove() + if err != nil { + t.Fatal("cannot remove old file: ", err) + } + w, err := file.Create() + if err != nil { + t.Fatal("cannot create new file: ", err) + } + _, err = w.Write(buf) + if err != nil { + t.Fatal("cannot write new file: ", err) + } + w.Close() +} + +func (h *dbCorruptHarness) removeAll(ft storage.FileType) { + ff, err := h.stor.GetFiles(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + for _, f := range ff { + if err := f.Remove(); err != nil { + h.t.Error("remove file: ", err) + } + } +} + +func (h *dbCorruptHarness) removeOne(ft storage.FileType) { + ff, err := h.stor.GetFiles(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + f := ff[rand.Intn(len(ff))] + h.t.Logf("removing file @%d", f.Num()) + if err := f.Remove(); err != nil { + h.t.Error("remove file: ", err) + } +} + +func (h *dbCorruptHarness) check(min, max int) { + p := &h.dbHarness + t := p.t + db := p.db + + var n, badk, badv, missed, good int + iter := db.NewIterator(nil, p.ro) + for iter.Next() { + k := 0 + fmt.Sscanf(string(iter.Key()), "%d", &k) + if k < n { + badk++ + continue + } + missed += k - n + n = k + 1 + if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { + badv++ + } else { + good++ + } + } + err := iter.Error() + iter.Release() + t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", + min, max, good, badk, badv, missed, err) + if good < min || good > max { + t.Errorf("good entries number not in range") + } +} + +func TestCorruptDB_Journal(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(100) + h.check(100, 100) + h.closeDB() + h.corrupt(storage.TypeJournal, 19, 1) + h.corrupt(storage.TypeJournal, 32*1024+1000, 1) + + h.openDB() + h.check(36, 36) + + h.close() +} + +func TestCorruptDB_Table(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(100) + h.compactMem() + h.compactRangeAt(0, "", "") + h.compactRangeAt(1, "", "") + h.closeDB() + h.corrupt(storage.TypeTable, 100, 1) + + h.openDB() + h.check(99, 99) + + h.close() +} + +func TestCorruptDB_TableIndex(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(10000) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, -2000, 500) + + h.openDB() + h.check(5000, 9999) + + h.close() +} + +func TestCorruptDB_MissingManifest(t *testing.T) { + rnd := rand.New(rand.NewSource(0x0badda7a)) + h := newDbCorruptHarnessWopt(t, &opt.Options{ + BlockCache: cache.NewLRUCache(100), + Strict: opt.StrictJournalChecksum, + WriteBuffer: 1000 * 60, + }) + + h.build(1000) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.deleteRand(500, 1000, rnd) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.deleteRand(500, 1000, rnd) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.closeDB() + + h.stor.SetIgnoreOpenErr(storage.TypeManifest) + h.removeAll(storage.TypeManifest) + h.openAssert(false) + h.stor.SetIgnoreOpenErr(0) + + h.recover() + h.check(1000, 1000) + h.build(1000) + h.compactMem() + h.compactRange("", "") + h.closeDB() + + h.recover() + h.check(1000, 1000) + + h.close() +} + +func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("foo", "v1") + h.put("foo", "v2") + h.put("foo", "v3") + h.put("foo", "v4") + h.put("foo", "v5") + h.closeDB() + + h.recover() + h.getVal("foo", "v5") + h.put("foo", "v6") + h.getVal("foo", "v6") + + h.reopenDB() + h.getVal("foo", "v6") + + h.close() +} + +func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("foo", "v1") + h.put("foo", "v2") + h.put("foo", "v3") + h.compactMem() + h.put("foo", "v4") + h.put("foo", "v5") + h.compactMem() + h.closeDB() + + h.recover() + h.getVal("foo", "v5") + h.put("foo", "v6") + h.getVal("foo", "v6") + + h.reopenDB() + h.getVal("foo", "v6") + + h.close() +} + +func TestCorruptDB_CorruptedManifest(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("foo", "hello") + h.compactMem() + h.compactRange("", "") + h.closeDB() + h.corrupt(storage.TypeManifest, 0, 1000) + h.openAssert(false) + + h.recover() + h.getVal("foo", "hello") + + h.close() +} + +func TestCorruptDB_CompactionInputError(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(10) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, 100, 1) + + h.openDB() + h.check(9, 9) + + h.build(10000) + h.check(10000, 10000) + + h.close() +} + +func TestCorruptDB_UnrelatedKeys(t *testing.T) { + h := newDbCorruptHarness(t) + + h.build(10) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, 100, 1) + + h.openDB() + h.put(string(tkey(1000)), string(tval(1000, ctValSize))) + h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) + h.compactMem() + h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) + + h.close() +} + +func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("a", "v2") + h.put("b", "v2") + h.compactMem() + h.put("a", "v3") + h.put("b", "v3") + h.compactMem() + h.put("c", "v0") + h.put("d", "v0") + h.compactMem() + h.compactRangeAt(1, "", "") + h.closeDB() + + h.recover() + h.getVal("a", "v3") + h.getVal("b", "v3") + h.getVal("c", "v0") + h.getVal("d", "v0") + + h.close() +} + +func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("a", "v2") + h.put("b", "v2") + h.compactMem() + h.put("a", "v3") + h.put("b", "v3") + h.compactMem() + h.put("c", "v0") + h.put("d", "v0") + h.compactMem() + h.compactRangeAt(0, "", "") + h.closeDB() + + h.recover() + h.getVal("a", "v3") + h.getVal("b", "v3") + h.getVal("c", "v0") + h.getVal("d", "v0") + + h.close() +} + +func TestCorruptDB_MissingTableFiles(t *testing.T) { + h := newDbCorruptHarness(t) + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("c", "v2") + h.put("d", "v2") + h.compactMem() + h.put("e", "v3") + h.put("f", "v3") + h.closeDB() + + h.removeOne(storage.TypeTable) + h.openAssert(false) + + h.close() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go new file mode 100644 index 000000000..73a691218 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go @@ -0,0 +1,834 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// DB is a LevelDB database. +type DB struct { + // Need 64-bit alignment. + seq uint64 + + // Session. + s *session + + // MemDB. + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFile storage.File + frozenJournalFile storage.File + frozenSeq uint64 + + // Snapshot. + snapsMu sync.Mutex + snapsRoot snapshotElement + + // Stats. + aliveSnaps, aliveIters int32 + + // Write. + writeC chan *Batch + writeMergedC chan bool + writeLockC chan struct{} + writeAckC chan error + journalC chan *Batch + journalAckC chan error + + // Compaction. + tcompCmdC chan cCmd + tcompPauseC chan chan<- struct{} + tcompTriggerC chan struct{} + mcompCmdC chan cCmd + mcompTriggerC chan struct{} + compErrC chan error + compErrSetC chan error + compStats [kNumLevels]cStats + + // Close. + closeW sync.WaitGroup + closeC chan struct{} + closed uint32 + closer io.Closer +} + +func openDB(s *session) (*DB, error) { + s.log("db@open opening") + start := time.Now() + db := &DB{ + s: s, + // Initial sequence + seq: s.stSeq, + // MemDB + memPool: make(chan *memdb.DB, 1), + // Write + writeC: make(chan *Batch), + writeMergedC: make(chan bool), + writeLockC: make(chan struct{}, 1), + writeAckC: make(chan error), + journalC: make(chan *Batch), + journalAckC: make(chan error), + // Compaction + tcompCmdC: make(chan cCmd), + tcompPauseC: make(chan chan<- struct{}), + tcompTriggerC: make(chan struct{}, 1), + mcompCmdC: make(chan cCmd), + mcompTriggerC: make(chan struct{}, 1), + compErrC: make(chan error), + compErrSetC: make(chan error), + // Close + closeC: make(chan struct{}), + } + db.initSnapshot() + + if err := db.recoverJournal(); err != nil { + return nil, err + } + + // Remove any obsolete files. + if err := db.checkAndCleanFiles(); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return nil, err + } + + // Don't include compaction error goroutine into wait group. + go db.compactionError() + + db.closeW.Add(3) + go db.tCompaction() + go db.mCompaction() + go db.jWriter() + go db.mpoolDrain() + + s.logf("db@open done T·%v", time.Since(start)) + + runtime.SetFinalizer(db, (*DB).Close) + return db, nil +} + +// Open opens or creates a DB for the given storage. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist Open will returns +// os.ErrExist error. +// +// Open will return an error with type of ErrCorrupted if corruption +// detected in the DB. Corrupted DB can be recovered with Recover +// function. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = s.recover() + if err != nil { + if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { + return + } + err = s.create() + if err != nil { + return + } + } else if s.o.GetErrorIfExist() { + err = os.ErrExist + return + } + + return openDB(s) +} + +// OpenFile opens or creates a DB for the given path. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist OpenFile will returns +// os.ErrExist error. +// +// OpenFile uses standard file-system backed storage implementation as +// desribed in the leveldb/storage package. +// +// OpenFile will return an error with type of ErrCorrupted if corruption +// detected in the DB. Corrupted DB can be recovered with Recover +// function. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func OpenFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path) + if err != nil { + return + } + db, err = Open(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +// Recover recovers and opens a DB with missing or corrupted manifest files +// for the given storage. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = recoverTable(s, o) + if err != nil { + return + } + return openDB(s) +} + +// RecoverFile recovers and opens a DB with missing or corrupted manifest files +// for the given path. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// RecoverFile uses standard file-system backed storage implementation as desribed +// in the leveldb/storage package. +// +// The returned DB instance is goroutine-safe. +// The DB must be closed after use, by calling Close method. +func RecoverFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path) + if err != nil { + return + } + db, err = Recover(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +func recoverTable(s *session, o *opt.Options) error { + // Get all tables and sort it by file number. + tableFiles_, err := s.getFiles(storage.TypeTable) + if err != nil { + return err + } + tableFiles := files(tableFiles_) + tableFiles.sort() + + var mSeq uint64 + var good, corrupted int + rec := new(sessionRecord) + bpool := util.NewBufferPool(o.GetBlockSize() + 5) + buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { + tmp = s.newTemp() + writer, err := tmp.Create() + if err != nil { + return + } + defer func() { + writer.Close() + if err != nil { + tmp.Remove() + tmp = nil + } + }() + + // Copy entries. + tw := table.NewWriter(writer, o) + for iter.Next() { + key := iter.Key() + if validIkey(key) { + err = tw.Append(key, iter.Value()) + if err != nil { + return + } + } + } + err = iter.Error() + if err != nil { + return + } + err = tw.Close() + if err != nil { + return + } + err = writer.Sync() + if err != nil { + return + } + size = int64(tw.BytesLen()) + return + } + recoverTable := func(file storage.File) error { + s.logf("table@recovery recovering @%d", file.Num()) + reader, err := file.Open() + if err != nil { + return err + } + defer reader.Close() + + // Get file size. + size, err := reader.Seek(0, 2) + if err != nil { + return err + } + + var tSeq uint64 + var tgood, tcorrupted, blockerr int + var imin, imax []byte + tr := table.NewReader(reader, size, nil, bpool, o) + iter := tr.NewIterator(nil, nil) + iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { + s.logf("table@recovery found error @%d %q", file.Num(), err) + blockerr++ + }) + + // Scan the table. + for iter.Next() { + key := iter.Key() + _, seq, _, ok := parseIkey(key) + if !ok { + tcorrupted++ + continue + } + tgood++ + if seq > tSeq { + tSeq = seq + } + if imin == nil { + imin = append([]byte{}, key...) + } + imax = append(imax[:0], key...) + } + if err := iter.Error(); err != nil { + iter.Release() + return err + } + iter.Release() + + if tgood > 0 { + if tcorrupted > 0 || blockerr > 0 { + // Rebuild the table. + s.logf("table@recovery rebuilding @%d", file.Num()) + iter := tr.NewIterator(nil, nil) + tmp, newSize, err := buildTable(iter) + iter.Release() + if err != nil { + return err + } + reader.Close() + if err := file.Replace(tmp); err != nil { + return err + } + size = newSize + } + if tSeq > mSeq { + mSeq = tSeq + } + // Add table to level 0. + rec.addTable(0, file.Num(), uint64(size), imin, imax) + s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq) + } else { + s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size) + } + + good += tgood + corrupted += tcorrupted + + return nil + } + + // Recover all tables. + if len(tableFiles) > 0 { + s.logf("table@recovery F·%d", len(tableFiles)) + + // Mark file number as used. + s.markFileNum(tableFiles[len(tableFiles)-1].Num()) + + for _, file := range tableFiles { + if err := recoverTable(file); err != nil { + return err + } + } + + s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(tableFiles), good, corrupted, mSeq) + } + + // Set sequence number. + rec.setSeq(mSeq + 1) + + // Create new manifest. + if err := s.create(); err != nil { + return err + } + + // Commit. + return s.commit(rec) +} + +func (db *DB) recoverJournal() error { + // Get all tables and sort it by file number. + journalFiles_, err := db.s.getFiles(storage.TypeJournal) + if err != nil { + return err + } + journalFiles := files(journalFiles_) + journalFiles.sort() + + // Discard older journal. + prev := -1 + for i, file := range journalFiles { + if file.Num() >= db.s.stJournalNum { + if prev >= 0 { + i-- + journalFiles[i] = journalFiles[prev] + } + journalFiles = journalFiles[i:] + break + } else if file.Num() == db.s.stPrevJournalNum { + prev = i + } + } + + var jr *journal.Reader + var of storage.File + var mem *memdb.DB + batch := new(Batch) + cm := newCMem(db.s) + buf := new(util.Buffer) + // Options. + strict := db.s.o.GetStrict(opt.StrictJournal) + checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer := db.s.o.GetWriteBuffer() + recoverJournal := func(file storage.File) error { + db.logf("journal@recovery recovering @%d", file.Num()) + reader, err := file.Open() + if err != nil { + return err + } + defer reader.Close() + + // Create/reset journal reader instance. + if jr == nil { + jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) + } else { + jr.Reset(reader, dropper{db.s, file}, strict, checksum) + } + + // Flush memdb and remove obsolete journal file. + if of != nil { + if mem.Len() > 0 { + if err := cm.flush(mem, 0); err != nil { + return err + } + } + if err := cm.commit(file.Num(), db.seq); err != nil { + return err + } + cm.reset() + of.Remove() + of = nil + } + + // Replay journal to memdb. + mem.Reset() + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + continue + } else { + return err + } + } + if err := batch.decode(buf.Bytes()); err != nil { + return err + } + if err := batch.memReplay(mem); err != nil { + return err + } + + // Save sequence number. + db.seq = batch.seq + uint64(batch.len()) + + // Flush it if large enough. + if mem.Size() >= writeBuffer { + if err := cm.flush(mem, 0); err != nil { + return err + } + mem.Reset() + } + } + + of = file + return nil + } + + // Recover all journals. + if len(journalFiles) > 0 { + db.logf("journal@recovery F·%d", len(journalFiles)) + + // Mark file number as used. + db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) + + mem = memdb.New(db.s.icmp, writeBuffer) + for _, file := range journalFiles { + if err := recoverJournal(file); err != nil { + return err + } + } + + // Flush the last journal. + if mem.Len() > 0 { + if err := cm.flush(mem, 0); err != nil { + return err + } + } + } + + // Create a new journal. + if _, err := db.newMem(0); err != nil { + return err + } + + // Commit. + if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return err + } + + // Remove the last obsolete journal file. + if of != nil { + of.Remove() + } + + return nil +} + +func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := newIKey(key, seq, tSeek) + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + mk, mv, me := m.mdb.Find(ikey) + if me == nil { + ukey, _, t, ok := parseIkey(mk) + if ok && db.s.icmp.uCompare(ukey, key) == 0 { + if t == tDel { + return nil, ErrNotFound + } + return append([]byte{}, mv...), nil + } + } else if me != ErrNotFound { + return nil, me + } + } + + v := db.s.version() + value, cSched, err := v.get(ikey, ro) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompTriggerC) + } + return +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contain the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = db.ok() + if err != nil { + return + } + + return db.get(key, db.getSeq(), ro) +} + +// NewIterator returns an iterator for the latest snapshot of the +// uderlying DB. +// The returned iterator is not goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + + snap := db.newSnapshot() + defer snap.Release() + return snap.NewIterator(slice, ro) +} + +// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot +// is a frozen snapshot of a DB state at a particular point in time. The +// content of snapshot are guaranteed to be consistent. +// +// The snapshot must be released after use, by calling Release method. +func (db *DB) GetSnapshot() (*Snapshot, error) { + if err := db.ok(); err != nil { + return nil, err + } + + return db.newSnapshot(), nil +} + +// GetProperty returns value of the given property name. +// +// Property names: +// leveldb.num-files-at-level{n} +// Returns the number of filer at level 'n'. +// leveldb.stats +// Returns statistics of the underlying DB. +// leveldb.sstables +// Returns sstables list for each level. +// leveldb.blockpool +// Returns block pool stats. +// leveldb.cachedblock +// Returns size of cached block. +// leveldb.openedtables +// Returns number of opened tables. +// leveldb.alivesnaps +// Returns number of alive snapshots. +// leveldb.aliveiters +// Returns number of alive iterators. +func (db *DB) GetProperty(name string) (value string, err error) { + err = db.ok() + if err != nil { + return + } + + const prefix = "leveldb." + if !strings.HasPrefix(name, prefix) { + return "", errors.New("leveldb: GetProperty: unknown property: " + name) + } + p := name[len(prefix):] + + v := db.s.version() + defer v.release() + + switch { + case strings.HasPrefix(p, "num-files-at-level"): + var level uint + var rest string + n, _ := fmt.Scanf("%d%s", &level, &rest) + if n != 1 || level >= kNumLevels { + err = errors.New("leveldb: GetProperty: invalid property: " + name) + } else { + value = fmt.Sprint(v.tLen(int(level))) + } + case p == "stats": + value = "Compactions\n" + + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + + "-------+------------+---------------+---------------+---------------+---------------\n" + for level, tables := range v.tables { + duration, read, write := db.compStats[level].get() + if len(tables) == 0 && duration == 0 { + continue + } + value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", + level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), + float64(read)/1048576.0, float64(write)/1048576.0) + } + case p == "sstables": + for level, tables := range v.tables { + value += fmt.Sprintf("--- level %d ---\n", level) + for _, t := range tables { + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) + } + } + case p == "blockpool": + value = fmt.Sprintf("%v", db.s.tops.bpool) + case p == "cachedblock": + if bc := db.s.o.GetBlockCache(); bc != nil { + value = fmt.Sprintf("%d", bc.Size()) + } else { + value = "" + } + case p == "openedtables": + value = fmt.Sprintf("%d", db.s.tops.cache.Size()) + case p == "alivesnaps": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) + case p == "aliveiters": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) + default: + err = errors.New("leveldb: GetProperty: unknown property: " + name) + } + + return +} + +// SizeOf calculates approximate sizes of the given key ranges. +// The length of the returned sizes are equal with the length of the given +// ranges. The returned sizes measure storage space usage, so if the user +// data compresses by a factor of ten, the returned sizes will be one-tenth +// the size of the corresponding user data size. +// The results may not include the sizes of recently written data. +func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { + if err := db.ok(); err != nil { + return nil, err + } + + v := db.s.version() + defer v.release() + + sizes := make(Sizes, 0, len(ranges)) + for _, r := range ranges { + imin := newIKey(r.Start, kMaxSeq, tSeek) + imax := newIKey(r.Limit, kMaxSeq, tSeek) + start, err := v.offsetOf(imin) + if err != nil { + return nil, err + } + limit, err := v.offsetOf(imax) + if err != nil { + return nil, err + } + var size uint64 + if limit >= start { + size = limit - start + } + sizes = append(sizes, size) + } + + return sizes, nil +} + +// Close closes the DB. This will also releases any outstanding snapshot and +// abort any in-flight compaction. +// +// It is not safe to close a DB until all outstanding iterators are released. +// It is valid to call Close multiple times. Other methods should not be +// called after the DB has been closed. +func (db *DB) Close() error { + if !db.setClosed() { + return ErrClosed + } + + start := time.Now() + db.log("db@close closing") + + // Clear the finalizer. + runtime.SetFinalizer(db, nil) + + // Get compaction error. + var err error + select { + case err = <-db.compErrC: + default: + } + + close(db.closeC) + + // Wait for the close WaitGroup. + db.closeW.Wait() + + // Close journal. + db.writeLockC <- struct{}{} + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + + // Close session. + db.s.close() + db.logf("db@close done T·%v", time.Since(start)) + db.s.release() + + if db.closer != nil { + if err1 := db.closer.Close(); err == nil { + err = err1 + } + } + + // NIL'ing pointers. + db.s = nil + db.mem = nil + db.frozenMem = nil + db.journal = nil + db.journalWriter = nil + db.journalFile = nil + db.frozenJournalFile = nil + db.snapsRoot = snapshotElement{} + db.closer = nil + + return err +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go new file mode 100644 index 000000000..ad385e91e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -0,0 +1,689 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +var ( + errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") +) + +type cStats struct { + sync.Mutex + duration time.Duration + read uint64 + write uint64 +} + +func (p *cStats) add(n *cStatsStaging) { + p.Lock() + p.duration += n.duration + p.read += n.read + p.write += n.write + p.Unlock() +} + +func (p *cStats) get() (duration time.Duration, read, write uint64) { + p.Lock() + defer p.Unlock() + return p.duration, p.read, p.write +} + +type cStatsStaging struct { + start time.Time + duration time.Duration + on bool + read uint64 + write uint64 +} + +func (p *cStatsStaging) startTimer() { + if !p.on { + p.start = time.Now() + p.on = true + } +} + +func (p *cStatsStaging) stopTimer() { + if p.on { + p.duration += time.Since(p.start) + p.on = false + } +} + +type cMem struct { + s *session + level int + rec *sessionRecord +} + +func newCMem(s *session) *cMem { + return &cMem{s: s, rec: new(sessionRecord)} +} + +func (c *cMem) flush(mem *memdb.DB, level int) error { + s := c.s + + // Write memdb to table. + iter := mem.NewIterator(nil) + defer iter.Release() + t, n, err := s.tops.createFrom(iter) + if err != nil { + return err + } + + // Pick level. + if level < 0 { + level = s.version_NB().pickLevel(t.imin.ukey(), t.imax.ukey()) + } + c.rec.addTableFile(level, t) + + s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) + + c.level = level + return nil +} + +func (c *cMem) reset() { + c.rec = new(sessionRecord) +} + +func (c *cMem) commit(journal, seq uint64) error { + c.rec.setJournalNum(journal) + c.rec.setSeq(seq) + + // Commit changes. + return c.s.commit(c.rec) +} + +func (db *DB) compactionError() { + var err error +noerr: + for { + select { + case err = <-db.compErrSetC: + if err != nil { + goto haserr + } + case _, _ = <-db.closeC: + return + } + } +haserr: + for { + select { + case db.compErrC <- err: + case err = <-db.compErrSetC: + if err == nil { + goto noerr + } + case _, _ = <-db.closeC: + return + } + } +} + +type compactionTransactCounter int + +func (cnt *compactionTransactCounter) incr() { + *cnt++ +} + +func (db *DB) compactionTransact(name string, exec func(cnt *compactionTransactCounter) error, rollback func() error) { + defer func() { + if x := recover(); x != nil { + if x == errCompactionTransactExiting && rollback != nil { + if err := rollback(); err != nil { + db.logf("%s rollback error %q", name, err) + } + } + panic(x) + } + }() + + const ( + backoffMin = 1 * time.Second + backoffMax = 8 * time.Second + backoffMul = 2 * time.Second + ) + backoff := backoffMin + backoffT := time.NewTimer(backoff) + lastCnt := compactionTransactCounter(0) + for n := 0; ; n++ { + // Check wether the DB is closed. + if db.isClosed() { + db.logf("%s exiting", name) + db.compactionExitTransact() + } else if n > 0 { + db.logf("%s retrying N·%d", name, n) + } + + // Execute. + cnt := compactionTransactCounter(0) + err := exec(&cnt) + + // Set compaction error status. + select { + case db.compErrSetC <- err: + case _, _ = <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + if err == nil { + return + } + db.logf("%s error I·%d %q", name, cnt, err) + + // Reset backoff duration if counter is advancing. + if cnt > lastCnt { + backoff = backoffMin + lastCnt = cnt + } + + // Backoff. + backoffT.Reset(backoff) + if backoff < backoffMax { + backoff *= backoffMul + if backoff > backoffMax { + backoff = backoffMax + } + } + select { + case <-backoffT.C: + case _, _ = <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + } +} + +func (db *DB) compactionExitTransact() { + panic(errCompactionTransactExiting) +} + +func (db *DB) memCompaction() { + mem := db.getFrozenMem() + if mem == nil { + return + } + defer mem.decref() + + c := newCMem(db.s) + stats := new(cStatsStaging) + + db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size())) + + // Don't compact empty memdb. + if mem.mdb.Len() == 0 { + db.logf("mem@flush skipping") + // drop frozen mem + db.dropFrozenMem() + return + } + + // Pause table compaction. + ch := make(chan struct{}) + select { + case db.tcompPauseC <- (chan<- struct{})(ch): + case _, _ = <-db.closeC: + return + } + + db.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + defer stats.stopTimer() + return c.flush(mem.mdb, -1) + }, func() error { + for _, r := range c.rec.addedTables { + db.logf("mem@flush rollback @%d", r.num) + f := db.s.getTableFile(r.num) + if err := f.Remove(); err != nil { + return err + } + } + return nil + }) + + db.compactionTransact("mem@commit", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + defer stats.stopTimer() + return c.commit(db.journalFile.Num(), db.frozenSeq) + }, nil) + + db.logf("mem@flush commited F·%d T·%v", len(c.rec.addedTables), stats.duration) + + for _, r := range c.rec.addedTables { + stats.write += r.size + } + db.compStats[c.level].add(stats) + + // Drop frozen mem. + db.dropFrozenMem() + + // Resume table compaction. + select { + case <-ch: + case _, _ = <-db.closeC: + return + } + + // Trigger table compaction. + db.compTrigger(db.mcompTriggerC) +} + +func (db *DB) tableCompaction(c *compaction, noTrivial bool) { + rec := new(sessionRecord) + rec.addCompactionPointer(c.level, c.imax) + + if !noTrivial && c.trivial() { + t := c.tables[0][0] + db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) + rec.deleteTable(c.level, t.file.Num()) + rec.addTableFile(c.level+1, t) + db.compactionTransact("table@move", func(cnt *compactionTransactCounter) (err error) { + return db.s.commit(rec) + }, nil) + return + } + + var stats [2]cStatsStaging + for i, tables := range c.tables { + for _, t := range tables { + stats[i].read += t.size + // Insert deleted tables into record + rec.deleteTable(c.level+i, t.file.Num()) + } + } + sourceSize := int(stats[0].read + stats[1].read) + minSeq := db.minSeq() + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) + + var snapUkey []byte + var snapHasUkey bool + var snapSeq uint64 + var snapIter int + var snapDropCnt int + var dropCnt int + db.compactionTransact("table@build", func(cnt *compactionTransactCounter) (err error) { + ukey := append([]byte{}, snapUkey...) + hasUkey := snapHasUkey + lseq := snapSeq + dropCnt = snapDropCnt + snapSched := snapIter == 0 + + var tw *tWriter + finish := func() error { + t, err := tw.finish() + if err != nil { + return err + } + rec.addTableFile(c.level+1, t) + stats[1].write += t.size + db.logf("table@build created L%d@%d N·%d S·%s %q:%q", c.level+1, t.file.Num(), tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + return nil + } + + defer func() { + stats[1].stopTimer() + if tw != nil { + tw.drop() + tw = nil + } + }() + + stats[1].startTimer() + iter := c.newIterator() + defer iter.Release() + for i := 0; iter.Next(); i++ { + // Incr transact counter. + cnt.incr() + + // Skip until last state. + if i < snapIter { + continue + } + + ikey := iKey(iter.Key()) + + if c.shouldStopBefore(ikey) && tw != nil { + err = finish() + if err != nil { + return + } + snapSched = true + tw = nil + } + + // Scheduled for snapshot, snapshot will used to retry compaction + // if error occured. + if snapSched { + snapUkey = append(snapUkey[:0], ukey...) + snapHasUkey = hasUkey + snapSeq = lseq + snapIter = i + snapDropCnt = dropCnt + snapSched = false + } + + if seq, vt, ok := ikey.parseNum(); !ok { + // Don't drop error keys + ukey = ukey[:0] + hasUkey = false + lseq = kMaxSeq + } else { + if !hasUkey || db.s.icmp.uCompare(ikey.ukey(), ukey) != 0 { + // First occurrence of this user key + ukey = append(ukey[:0], ikey.ukey()...) + hasUkey = true + lseq = kMaxSeq + } + + drop := false + if lseq <= minSeq { + // Dropped because newer entry for same user key exist + drop = true // (A) + } else if vt == tDel && seq <= minSeq && c.baseLevelForKey(ukey) { + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger seq numbers + // (3) data in layers that are being compacted here and have + // smaller seq numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + drop = true + } + + lseq = seq + if drop { + dropCnt++ + continue + } + } + + // Create new table if not already + if tw == nil { + // Check for pause event. + select { + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + case _, _ = <-db.closeC: + db.compactionExitTransact() + default: + } + + // Create new table. + tw, err = db.s.tops.create() + if err != nil { + return + } + } + + // Write key/value into table + err = tw.append(ikey, iter.Value()) + if err != nil { + return + } + + // Finish table if it is big enough + if tw.tw.BytesLen() >= kMaxTableSize { + err = finish() + if err != nil { + return + } + snapSched = true + tw = nil + } + } + + err = iter.Error() + if err != nil { + return + } + + // Finish last table + if tw != nil && !tw.empty() { + err = finish() + if err != nil { + return + } + tw = nil + } + return + }, func() error { + for _, r := range rec.addedTables { + db.logf("table@build rollback @%d", r.num) + f := db.s.getTableFile(r.num) + if err := f.Remove(); err != nil { + return err + } + } + return nil + }) + + // Commit changes + db.compactionTransact("table@commit", func(cnt *compactionTransactCounter) (err error) { + stats[1].startTimer() + defer stats[1].stopTimer() + return db.s.commit(rec) + }, nil) + + resultSize := int(stats[1].write) + db.logf("table@compaction commited F%s S%s D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), dropCnt, stats[1].duration) + + // Save compaction stats + for i := range stats { + db.compStats[c.level+1].add(&stats[i]) + } +} + +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { + db.logf("table@compaction range L%d %q:%q", level, umin, umax) + + if level >= 0 { + if c := db.s.getCompactionRange(level, umin, umax); c != nil { + db.tableCompaction(c, true) + } + } else { + v := db.s.version_NB() + + m := 1 + for i, t := range v.tables[1:] { + if t.overlaps(db.s.icmp, umin, umax, false) { + m = i + 1 + } + } + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax); c != nil { + db.tableCompaction(c, true) + } + } + } +} + +func (db *DB) tableAutoCompaction() { + if c := db.s.pickCompaction(); c != nil { + db.tableCompaction(c, false) + } +} + +func (db *DB) tableNeedCompaction() bool { + return db.s.version_NB().needCompaction() +} + +func (db *DB) pauseCompaction(ch chan<- struct{}) { + select { + case ch <- struct{}{}: + case _, _ = <-db.closeC: + db.compactionExitTransact() + } +} + +type cCmd interface { + ack(err error) +} + +type cIdle struct { + ackC chan<- error +} + +func (r cIdle) ack(err error) { + r.ackC <- err +} + +type cRange struct { + level int + min, max []byte + ackC chan<- error +} + +func (r cRange) ack(err error) { + defer func() { + recover() + }() + if r.ackC != nil { + r.ackC <- err + } +} + +func (db *DB) compSendIdle(compC chan<- cCmd) error { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cIdle{ch}: + case err := <-db.compErrC: + return err + case _, _ = <-db.closeC: + return ErrClosed + } + // Wait cmd. + return <-ch +} + +func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cRange{level, min, max, ch}: + case err := <-db.compErrC: + return err + case _, _ = <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-db.compErrC: + case err = <-ch: + } + return err +} + +func (db *DB) compTrigger(compTriggerC chan struct{}) { + select { + case compTriggerC <- struct{}{}: + default: + } +} + +func (db *DB) mCompaction() { + var x cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + select { + case x = <-db.mcompCmdC: + db.memCompaction() + x.ack(nil) + x = nil + case <-db.mcompTriggerC: + db.memCompaction() + case _, _ = <-db.closeC: + return + } + } +} + +func (db *DB) tCompaction() { + var x cCmd + var ackQ []cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + for i := range ackQ { + ackQ[i].ack(ErrClosed) + ackQ[i] = nil + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + if db.tableNeedCompaction() { + select { + case x = <-db.tcompCmdC: + case <-db.tcompTriggerC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case _, _ = <-db.closeC: + return + default: + } + } else { + for i := range ackQ { + ackQ[i].ack(nil) + ackQ[i] = nil + } + ackQ = ackQ[:0] + select { + case x = <-db.tcompCmdC: + case <-db.tcompTriggerC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case _, _ = <-db.closeC: + return + } + } + if x != nil { + switch cmd := x.(type) { + case cIdle: + ackQ = append(ackQ, x) + case cRange: + db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) + x.ack(nil) + } + x = nil + } + db.tableAutoCompaction() + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go new file mode 100644 index 000000000..120e6b0ed --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -0,0 +1,331 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "runtime" + "sync" + "sync/atomic" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") +) + +type memdbReleaser struct { + once sync.Once + m *memDB +} + +func (mr *memdbReleaser) Release() { + mr.once.Do(func() { + mr.m.decref() + }) +} + +func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + em, fm := db.getMems() + v := db.s.version() + + ti := v.getIterators(slice, ro) + n := len(ti) + 2 + i := make([]iterator.Iterator, 0, n) + emi := em.mdb.NewIterator(slice) + emi.SetReleaser(&memdbReleaser{m: em}) + i = append(i, emi) + if fm != nil { + fmi := fm.mdb.NewIterator(slice) + fmi.SetReleaser(&memdbReleaser{m: fm}) + i = append(i, fmi) + } + i = append(i, ti...) + strict := db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) + mi := iterator.NewMergedIterator(i, db.s.icmp, strict) + mi.SetReleaser(&versionReleaser{v: v}) + return mi +} + +func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { + var islice *util.Range + if slice != nil { + islice = &util.Range{} + if slice.Start != nil { + islice.Start = newIKey(slice.Start, kMaxSeq, tSeek) + } + if slice.Limit != nil { + islice.Limit = newIKey(slice.Limit, kMaxSeq, tSeek) + } + } + rawIter := db.newRawIterator(islice, ro) + iter := &dbIter{ + db: db, + icmp: db.s.icmp, + iter: rawIter, + seq: seq, + strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator), + key: make([]byte, 0), + value: make([]byte, 0), + } + atomic.AddInt32(&db.aliveIters, 1) + runtime.SetFinalizer(iter, (*dbIter).Release) + return iter +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +// dbIter represent an interator states over a database session. +type dbIter struct { + db *DB + icmp *iComparer + iter iterator.Iterator + seq uint64 + strict bool + + dir dir + key []byte + value []byte + err error + releaser util.Releaser +} + +func (i *dbIter) setErr(err error) { + i.err = err + i.key = nil + i.value = nil +} + +func (i *dbIter) iterErr() { + if err := i.iter.Error(); err != nil { + i.setErr(err) + } +} + +func (i *dbIter) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *dbIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.First() { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.Last() { + return i.prev() + } + i.dir = dirSOI + i.iterErr() + return false +} + +func (i *dbIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ikey := newIKey(key, i.seq, tSeek) + if i.iter.Seek(ikey) { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) next() bool { + for { + ukey, seq, t, ok := parseIkey(i.iter.Key()) + if ok { + if seq <= i.seq { + switch t { + case tDel: + // Skip deleted key. + i.key = append(i.key[:0], ukey...) + i.dir = dirForward + case tVal: + if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + i.dir = dirForward + return true + } + } + } + } else if i.strict { + i.setErr(errInvalidIkey) + break + } + if !i.iter.Next() { + i.dir = dirEOI + i.iterErr() + break + } + } + return false +} + +func (i *dbIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { + i.dir = dirEOI + i.iterErr() + return false + } + return i.next() +} + +func (i *dbIter) prev() bool { + i.dir = dirBackward + del := true + if i.iter.Valid() { + for { + ukey, seq, t, ok := parseIkey(i.iter.Key()) + if ok { + if seq <= i.seq { + if !del && i.icmp.uCompare(ukey, i.key) < 0 { + return true + } + del = (t == tDel) + if !del { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + } + } + } else if i.strict { + i.setErr(errInvalidIkey) + return false + } + if !i.iter.Prev() { + break + } + } + } + if del { + i.dir = dirSOI + i.iterErr() + return false + } + return true +} + +func (i *dbIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + for i.iter.Prev() { + ukey, _, _, ok := parseIkey(i.iter.Key()) + if ok { + if i.icmp.uCompare(ukey, i.key) < 0 { + goto cont + } + } else if i.strict { + i.setErr(errInvalidIkey) + return false + } + } + i.dir = dirSOI + i.iterErr() + return false + } + +cont: + return i.prev() +} + +func (i *dbIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *dbIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *dbIter) Release() { + if i.dir != dirReleased { + // Clear the finalizer. + runtime.SetFinalizer(i, nil) + + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + + i.dir = dirReleased + i.key = nil + i.value = nil + i.iter.Release() + i.iter = nil + atomic.AddInt32(&i.db.aliveIters, -1) + i.db = nil + } +} + +func (i *dbIter) SetReleaser(releaser util.Releaser) { + if i.dir != dirReleased { + i.releaser = releaser + } +} + +func (i *dbIter) Error() error { + return i.err +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go new file mode 100644 index 000000000..d7625b1a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -0,0 +1,169 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "runtime" + "sync" + "sync/atomic" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type snapshotElement struct { + seq uint64 + ref int + // Next and previous pointers in the doubly-linked list of elements. + next, prev *snapshotElement +} + +// Initialize the snapshot. +func (db *DB) initSnapshot() { + db.snapsRoot.next = &db.snapsRoot + db.snapsRoot.prev = &db.snapsRoot +} + +// Acquires a snapshot, based on latest sequence. +func (db *DB) acquireSnapshot() *snapshotElement { + db.snapsMu.Lock() + seq := db.getSeq() + elem := db.snapsRoot.prev + if elem == &db.snapsRoot || elem.seq != seq { + at := db.snapsRoot.prev + next := at.next + elem = &snapshotElement{ + seq: seq, + prev: at, + next: next, + } + at.next = elem + next.prev = elem + } + elem.ref++ + db.snapsMu.Unlock() + return elem +} + +// Releases given snapshot element. +func (db *DB) releaseSnapshot(elem *snapshotElement) { + if !db.isClosed() { + db.snapsMu.Lock() + elem.ref-- + if elem.ref == 0 { + elem.prev.next = elem.next + elem.next.prev = elem.prev + elem.next = nil + elem.prev = nil + } else if elem.ref < 0 { + panic("leveldb: Snapshot: negative element reference") + } + db.snapsMu.Unlock() + } +} + +// Gets minimum sequence that not being snapshoted. +func (db *DB) minSeq() uint64 { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + elem := db.snapsRoot.prev + if elem != &db.snapsRoot { + return elem.seq + } + return db.getSeq() +} + +// Snapshot is a DB snapshot. +type Snapshot struct { + db *DB + elem *snapshotElement + mu sync.RWMutex + released bool +} + +// Creates new snapshot object. +func (db *DB) newSnapshot() *Snapshot { + snap := &Snapshot{ + db: db, + elem: db.acquireSnapshot(), + } + atomic.AddInt32(&db.aliveSnaps, 1) + runtime.SetFinalizer(snap, (*Snapshot).Release) + return snap +} + +// Get gets the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.get(key, snap.elem.seq, ro) +} + +// NewIterator returns an iterator for the snapshot of the uderlying DB. +// The returned iterator is not goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// Releasing the snapshot doesn't mean releasing the iterator too, the +// iterator would be still valid until released. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := snap.db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + snap.mu.Lock() + defer snap.mu.Unlock() + if snap.released { + return iterator.NewEmptyIterator(ErrSnapshotReleased) + } + // Since iterator already hold version ref, it doesn't need to + // hold snapshot ref. + return snap.db.newIterator(snap.elem.seq, slice, ro) +} + +// Release releases the snapshot. This will not release any returned +// iterators, the iterators would still be valid until released or the +// underlying DB is closed. +// +// Other methods should not be called after the snapshot has been released. +func (snap *Snapshot) Release() { + snap.mu.Lock() + defer snap.mu.Unlock() + + if !snap.released { + // Clear the finalizer. + runtime.SetFinalizer(snap, nil) + + snap.released = true + snap.db.releaseSnapshot(snap.elem) + atomic.AddInt32(&snap.db.aliveSnaps, -1) + snap.db = nil + snap.elem = nil + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go new file mode 100644 index 000000000..7dadef81e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -0,0 +1,202 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" +) + +type memDB struct { + db *DB + mdb *memdb.DB + ref int32 +} + +func (m *memDB) incref() { + atomic.AddInt32(&m.ref, 1) +} + +func (m *memDB) decref() { + if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { + // Only put back memdb with std capacity. + if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() { + m.mdb.Reset() + m.db.mpoolPut(m.mdb) + } + m.db = nil + m.mdb = nil + } else if ref < 0 { + panic("negative memdb ref") + } +} + +// Get latest sequence number. +func (db *DB) getSeq() uint64 { + return atomic.LoadUint64(&db.seq) +} + +// Atomically adds delta to seq. +func (db *DB) addSeq(delta uint64) { + atomic.AddUint64(&db.seq, delta) +} + +func (db *DB) mpoolPut(mem *memdb.DB) { + defer func() { + recover() + }() + select { + case db.memPool <- mem: + default: + } +} + +func (db *DB) mpoolGet() *memdb.DB { + select { + case mem := <-db.memPool: + return mem + default: + return nil + } +} + +func (db *DB) mpoolDrain() { + ticker := time.NewTicker(30 * time.Second) + for { + select { + case <-ticker.C: + select { + case <-db.memPool: + default: + } + case _, _ = <-db.closeC: + close(db.memPool) + return + } + } +} + +// Create new memdb and froze the old one; need external synchronization. +// newMem only called synchronously by the writer. +func (db *DB) newMem(n int) (mem *memDB, err error) { + num := db.s.allocFileNum() + file := db.s.getJournalFile(num) + w, err := file.Create() + if err != nil { + db.s.reuseFileNum(num) + return + } + + db.memMu.Lock() + defer db.memMu.Unlock() + + if db.frozenMem != nil { + panic("still has frozen mem") + } + + if db.journal == nil { + db.journal = journal.NewWriter(w) + } else { + db.journal.Reset(w) + db.journalWriter.Close() + db.frozenJournalFile = db.journalFile + } + db.journalWriter = w + db.journalFile = file + db.frozenMem = db.mem + mdb := db.mpoolGet() + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + mem = &memDB{ + db: db, + mdb: mdb, + ref: 2, + } + db.mem = mem + // The seq only incremented by the writer. And whoever called newMem + // should hold write lock, so no need additional synchronization here. + db.frozenSeq = db.seq + return +} + +// Get all memdbs. +func (db *DB) getMems() (e, f *memDB) { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem == nil { + panic("nil effective mem") + } + db.mem.incref() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.mem, db.frozenMem +} + +// Get frozen memdb. +func (db *DB) getEffectiveMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem == nil { + panic("nil effective mem") + } + db.mem.incref() + return db.mem +} + +// Check whether we has frozen memdb. +func (db *DB) hasFrozenMem() bool { + db.memMu.RLock() + defer db.memMu.RUnlock() + return db.frozenMem != nil +} + +// Get frozen memdb. +func (db *DB) getFrozenMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.frozenMem +} + +// Drop frozen memdb; assume that frozen memdb isn't nil. +func (db *DB) dropFrozenMem() { + db.memMu.Lock() + if err := db.frozenJournalFile.Remove(); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) + } else { + db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) + } + db.frozenJournalFile = nil + db.frozenMem.decref() + db.frozenMem = nil + db.memMu.Unlock() +} + +// Set closed flag; return true if not already closed. +func (db *DB) setClosed() bool { + return atomic.CompareAndSwapUint32(&db.closed, 0, 1) +} + +// Check whether DB was closed. +func (db *DB) isClosed() bool { + return atomic.LoadUint32(&db.closed) != 0 +} + +// Check read ok status. +func (db *DB) ok() error { + if db.isClosed() { + return ErrClosed + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go new file mode 100644 index 000000000..02f69bf1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go @@ -0,0 +1,1890 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func tkey(i int) []byte { + return []byte(fmt.Sprintf("%016d", i)) +} + +func tval(seed, n int) []byte { + r := rand.New(rand.NewSource(int64(seed))) + return randomString(r, n) +} + +type dbHarness struct { + t *testing.T + + stor *testStorage + db *DB + o *opt.Options + ro *opt.ReadOptions + wo *opt.WriteOptions +} + +func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { + h := new(dbHarness) + h.init(t, o) + return h +} + +func newDbHarness(t *testing.T) *dbHarness { + return newDbHarnessWopt(t, &opt.Options{}) +} + +func (h *dbHarness) init(t *testing.T, o *opt.Options) { + h.t = t + h.stor = newTestStorage(t) + h.o = o + h.ro = nil + h.wo = nil + + if err := h.openDB0(); err != nil { + // So that it will come after fatal message. + defer h.stor.Close() + h.t.Fatal("Open (init): got error: ", err) + } +} + +func (h *dbHarness) openDB0() (err error) { + h.t.Log("opening DB") + h.db, err = Open(h.stor, h.o) + return +} + +func (h *dbHarness) openDB() { + if err := h.openDB0(); err != nil { + h.t.Fatal("Open: got error: ", err) + } +} + +func (h *dbHarness) closeDB0() error { + h.t.Log("closing DB") + return h.db.Close() +} + +func (h *dbHarness) closeDB() { + if err := h.closeDB0(); err != nil { + h.t.Error("Close: got error: ", err) + } + h.stor.CloseCheck() + runtime.GC() +} + +func (h *dbHarness) reopenDB() { + h.closeDB() + h.openDB() +} + +func (h *dbHarness) close() { + h.closeDB0() + h.db = nil + h.stor.Close() + h.stor = nil + runtime.GC() +} + +func (h *dbHarness) openAssert(want bool) { + db, err := Open(h.stor, h.o) + if err != nil { + if want { + h.t.Error("Open: assert: got error: ", err) + } else { + h.t.Log("Open: assert: got error (expected): ", err) + } + } else { + if !want { + h.t.Error("Open: assert: expect error") + } + db.Close() + } +} + +func (h *dbHarness) write(batch *Batch) { + if err := h.db.Write(batch, h.wo); err != nil { + h.t.Error("Write: got error: ", err) + } +} + +func (h *dbHarness) put(key, value string) { + if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { + h.t.Error("Put: got error: ", err) + } +} + +func (h *dbHarness) putMulti(n int, low, hi string) { + for i := 0; i < n; i++ { + h.put(low, "begin") + h.put(hi, "end") + h.compactMem() + } +} + +func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { + t := h.t + db := h.db + + var res uint64 + v := db.s.version() + for i, tt := range v.tables[1 : len(v.tables)-1] { + level := i + 1 + next := v.tables[level+1] + for _, t := range tt { + r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) + sum := r.size() + if sum > res { + res = sum + } + } + } + v.release() + + if res > want { + t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res) + } +} + +func (h *dbHarness) delete(key string) { + t := h.t + db := h.db + + err := db.Delete([]byte(key), h.wo) + if err != nil { + t.Error("Delete: got error: ", err) + } +} + +func (h *dbHarness) assertNumKeys(want int) { + iter := h.db.NewIterator(nil, h.ro) + defer iter.Release() + got := 0 + for iter.Next() { + got++ + } + if err := iter.Error(); err != nil { + h.t.Error("assertNumKeys: ", err) + } + if want != got { + h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) + } +} + +func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { + t := h.t + v, err := db.Get([]byte(key), h.ro) + switch err { + case ErrNotFound: + if expectFound { + t.Errorf("Get: key '%s' not found, want found", key) + } + case nil: + found = true + if !expectFound { + t.Errorf("Get: key '%s' found, want not found", key) + } + default: + t.Error("Get: got error: ", err) + } + return +} + +func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { + return h.getr(h.db, key, expectFound) +} + +func (h *dbHarness) getValr(db Reader, key, value string) { + t := h.t + found, r := h.getr(db, key, true) + if !found { + return + } + rval := string(r) + if rval != value { + t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) + } +} + +func (h *dbHarness) getVal(key, value string) { + h.getValr(h.db, key, value) +} + +func (h *dbHarness) allEntriesFor(key, want string) { + t := h.t + db := h.db + s := db.s + + ikey := newIKey([]byte(key), kMaxSeq, tVal) + iter := db.newRawIterator(nil, nil) + if !iter.Seek(ikey) && iter.Error() != nil { + t.Error("AllEntries: error during seek, err: ", iter.Error()) + return + } + res := "[ " + first := true + for iter.Valid() { + rkey := iKey(iter.Key()) + if _, t, ok := rkey.parseNum(); ok { + if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 { + break + } + if !first { + res += ", " + } + first = false + switch t { + case tVal: + res += string(iter.Value()) + case tDel: + res += "DEL" + } + } else { + if !first { + res += ", " + } + first = false + res += "CORRUPTED" + } + iter.Next() + } + if !first { + res += " " + } + res += "]" + if res != want { + t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) + } +} + +// Return a string that contains all key,value pairs in order, +// formatted like "(k1->v1)(k2->v2)". +func (h *dbHarness) getKeyVal(want string) { + t := h.t + db := h.db + + s, err := db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + res := "" + iter := s.NewIterator(nil, nil) + for iter.Next() { + res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) + } + iter.Release() + + if res != want { + t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) + } + s.Release() +} + +func (h *dbHarness) waitCompaction() { + t := h.t + db := h.db + if err := db.compSendIdle(db.tcompCmdC); err != nil { + t.Error("compaction error: ", err) + } +} + +func (h *dbHarness) waitMemCompaction() { + t := h.t + db := h.db + + if err := db.compSendIdle(db.mcompCmdC); err != nil { + t.Error("compaction error: ", err) + } +} + +func (h *dbHarness) compactMem() { + t := h.t + db := h.db + + db.writeLockC <- struct{}{} + defer func() { + <-db.writeLockC + }() + + if _, err := db.rotateMem(0); err != nil { + t.Error("compaction error: ", err) + } + if err := db.compSendIdle(db.mcompCmdC); err != nil { + t.Error("compaction error: ", err) + } + + if h.totalTables() == 0 { + t.Error("zero tables after mem compaction") + } +} + +func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { + t := h.t + db := h.db + + var _min, _max []byte + if min != "" { + _min = []byte(min) + } + if max != "" { + _max = []byte(max) + } + + if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { + if wanterr { + t.Log("CompactRangeAt: got error (expected): ", err) + } else { + t.Error("CompactRangeAt: got error: ", err) + } + } else if wanterr { + t.Error("CompactRangeAt: expect error") + } +} + +func (h *dbHarness) compactRangeAt(level int, min, max string) { + h.compactRangeAtErr(level, min, max, false) +} + +func (h *dbHarness) compactRange(min, max string) { + t := h.t + db := h.db + + var r util.Range + if min != "" { + r.Start = []byte(min) + } + if max != "" { + r.Limit = []byte(max) + } + if err := db.CompactRange(r); err != nil { + t.Error("CompactRange: got error: ", err) + } +} + +func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { + t := h.t + db := h.db + + s, err := db.SizeOf([]util.Range{ + {[]byte(start), []byte(limit)}, + }) + if err != nil { + t.Error("SizeOf: got error: ", err) + } + if s.Sum() < low || s.Sum() > hi { + t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d", + shorten(start), shorten(limit), low, hi, s.Sum()) + } +} + +func (h *dbHarness) getSnapshot() (s *Snapshot) { + s, err := h.db.GetSnapshot() + if err != nil { + h.t.Fatal("GetSnapshot: got error: ", err) + } + return +} +func (h *dbHarness) tablesPerLevel(want string) { + res := "" + nz := 0 + v := h.db.s.version() + for level, tt := range v.tables { + if level > 0 { + res += "," + } + res += fmt.Sprint(len(tt)) + if len(tt) > 0 { + nz = len(res) + } + } + v.release() + res = res[:nz] + if res != want { + h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) + } +} + +func (h *dbHarness) totalTables() (n int) { + v := h.db.s.version() + for _, tt := range v.tables { + n += len(tt) + } + v.release() + return +} + +type keyValue interface { + Key() []byte + Value() []byte +} + +func testKeyVal(t *testing.T, kv keyValue, want string) { + res := string(kv.Key()) + "->" + string(kv.Value()) + if res != want { + t.Errorf("invalid key/value, want=%q, got=%q", want, res) + } +} + +func numKey(num int) string { + return fmt.Sprintf("key%06d", num) +} + +var _bloom_filter = filter.NewBloomFilter(10) + +func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { + for i := 0; i < 4; i++ { + func() { + switch i { + case 0: + case 1: + if o == nil { + o = &opt.Options{Filter: _bloom_filter} + } else { + old := o + o = &opt.Options{} + *o = *old + o.Filter = _bloom_filter + } + case 2: + if o == nil { + o = &opt.Options{Compression: opt.NoCompression} + } else { + old := o + o = &opt.Options{} + *o = *old + o.Compression = opt.NoCompression + } + } + h := newDbHarnessWopt(t, o) + defer h.close() + switch i { + case 3: + h.reopenDB() + } + f(h) + }() + } +} + +func trun(t *testing.T, f func(h *dbHarness)) { + truno(t, nil, f) +} + +func testAligned(t *testing.T, name string, offset uintptr) { + if offset%8 != 0 { + t.Errorf("field %s offset is not 64-bit aligned", name) + } +} + +func Test_FieldsAligned(t *testing.T) { + p1 := new(DB) + testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) + p2 := new(session) + testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum)) + testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) + testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) + testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq)) +} + +func TestDb_Locking(t *testing.T) { + h := newDbHarness(t) + defer h.stor.Close() + h.openAssert(false) + h.closeDB() + h.openAssert(true) +} + +func TestDb_Empty(t *testing.T) { + trun(t, func(h *dbHarness) { + h.get("foo", false) + + h.reopenDB() + h.get("foo", false) + }) +} + +func TestDb_ReadWrite(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.getVal("foo", "v1") + h.put("bar", "v2") + h.put("foo", "v3") + h.getVal("foo", "v3") + h.getVal("bar", "v2") + + h.reopenDB() + h.getVal("foo", "v3") + h.getVal("bar", "v2") + }) +} + +func TestDb_PutDeleteGet(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.getVal("foo", "v1") + h.put("foo", "v2") + h.getVal("foo", "v2") + h.delete("foo") + h.get("foo", false) + + h.reopenDB() + h.get("foo", false) + }) +} + +func TestDb_EmptyBatch(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.get("foo", false) + err := h.db.Write(new(Batch), h.wo) + if err != nil { + t.Error("writing empty batch yield error: ", err) + } + h.get("foo", false) +} + +func TestDb_GetFromFrozen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) + defer h.close() + + h.put("foo", "v1") + h.getVal("foo", "v1") + + h.stor.DelaySync(storage.TypeTable) // Block sync calls + h.put("k1", strings.Repeat("x", 100000)) // Fill memtable + h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction + for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { + time.Sleep(10 * time.Microsecond) + } + if h.db.getFrozenMem() == nil { + h.stor.ReleaseSync(storage.TypeTable) + t.Fatal("No frozen mem") + } + h.getVal("foo", "v1") + h.stor.ReleaseSync(storage.TypeTable) // Release sync calls + + h.reopenDB() + h.getVal("foo", "v1") + h.get("k1", true) + h.get("k2", true) +} + +func TestDb_GetFromTable(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.compactMem() + h.getVal("foo", "v1") + }) +} + +func TestDb_GetSnapshot(t *testing.T) { + trun(t, func(h *dbHarness) { + bar := strings.Repeat("b", 200) + h.put("foo", "v1") + h.put(bar, "v1") + + snap, err := h.db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + + h.put("foo", "v2") + h.put(bar, "v2") + + h.getVal("foo", "v2") + h.getVal(bar, "v2") + h.getValr(snap, "foo", "v1") + h.getValr(snap, bar, "v1") + + h.compactMem() + + h.getVal("foo", "v2") + h.getVal(bar, "v2") + h.getValr(snap, "foo", "v1") + h.getValr(snap, bar, "v1") + + snap.Release() + + h.reopenDB() + h.getVal("foo", "v2") + h.getVal(bar, "v2") + }) +} + +func TestDb_GetLevel0Ordering(t *testing.T) { + trun(t, func(h *dbHarness) { + for i := 0; i < 4; i++ { + h.put("bar", fmt.Sprintf("b%d", i)) + h.put("foo", fmt.Sprintf("v%d", i)) + h.compactMem() + } + h.getVal("foo", "v3") + h.getVal("bar", "b3") + + v := h.db.s.version() + t0len := v.tLen(0) + v.release() + if t0len < 2 { + t.Errorf("level-0 tables is less than 2, got %d", t0len) + } + + h.reopenDB() + h.getVal("foo", "v3") + h.getVal("bar", "b3") + }) +} + +func TestDb_GetOrderedByLevels(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.compactMem() + h.compactRange("a", "z") + h.getVal("foo", "v1") + h.put("foo", "v2") + h.compactMem() + h.getVal("foo", "v2") + }) +} + +func TestDb_GetPicksCorrectFile(t *testing.T) { + trun(t, func(h *dbHarness) { + // Arrange to have multiple files in a non-level-0 level. + h.put("a", "va") + h.compactMem() + h.compactRange("a", "b") + h.put("x", "vx") + h.compactMem() + h.compactRange("x", "y") + h.put("f", "vf") + h.compactMem() + h.compactRange("f", "g") + + h.getVal("a", "va") + h.getVal("f", "vf") + h.getVal("x", "vx") + + h.compactRange("", "") + h.getVal("a", "va") + h.getVal("f", "vf") + h.getVal("x", "vx") + }) +} + +func TestDb_GetEncountersEmptyLevel(t *testing.T) { + trun(t, func(h *dbHarness) { + // Arrange for the following to happen: + // * sstable A in level 0 + // * nothing in level 1 + // * sstable B in level 2 + // Then do enough Get() calls to arrange for an automatic compaction + // of sstable A. A bug would cause the compaction to be marked as + // occuring at level 1 (instead of the correct level 0). + + // Step 1: First place sstables in levels 0 and 2 + for i := 0; ; i++ { + if i >= 100 { + t.Fatal("could not fill levels-0 and level-2") + } + v := h.db.s.version() + if v.tLen(0) > 0 && v.tLen(2) > 0 { + v.release() + break + } + v.release() + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + + h.getVal("a", "begin") + h.getVal("z", "end") + } + + // Step 2: clear level 1 if necessary. + h.compactRangeAt(1, "", "") + h.tablesPerLevel("1,0,1") + + h.getVal("a", "begin") + h.getVal("z", "end") + + // Step 3: read a bunch of times + for i := 0; i < 200; i++ { + h.get("missing", false) + } + + // Step 4: Wait for compaction to finish + h.waitCompaction() + + v := h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + v.release() + + h.getVal("a", "begin") + h.getVal("z", "end") + }) +} + +func TestDb_IterMultiWithDelete(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("a", "va") + h.put("b", "vb") + h.put("c", "vc") + h.delete("b") + h.get("b", false) + + iter := h.db.NewIterator(nil, nil) + iter.Seek([]byte("c")) + testKeyVal(t, iter, "c->vc") + iter.Prev() + testKeyVal(t, iter, "a->va") + iter.Release() + + h.compactMem() + + iter = h.db.NewIterator(nil, nil) + iter.Seek([]byte("c")) + testKeyVal(t, iter, "c->vc") + iter.Prev() + testKeyVal(t, iter, "a->va") + iter.Release() + }) +} + +func TestDb_IteratorPinsRef(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "hello") + + // Get iterator that will yield the current contents of the DB. + iter := h.db.NewIterator(nil, nil) + + // Write to force compactions + h.put("foo", "newvalue1") + for i := 0; i < 100; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } + h.put("foo", "newvalue2") + + iter.First() + testKeyVal(t, iter, "foo->hello") + if iter.Next() { + t.Errorf("expect eof") + } + iter.Release() +} + +func TestDb_Recover(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.put("baz", "v5") + + h.reopenDB() + h.getVal("foo", "v1") + + h.getVal("foo", "v1") + h.getVal("baz", "v5") + h.put("bar", "v2") + h.put("foo", "v3") + + h.reopenDB() + h.getVal("foo", "v3") + h.put("foo", "v4") + h.getVal("foo", "v4") + h.getVal("bar", "v2") + h.getVal("baz", "v5") + }) +} + +func TestDb_RecoverWithEmptyJournal(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.put("foo", "v2") + + h.reopenDB() + h.reopenDB() + h.put("foo", "v3") + + h.reopenDB() + h.getVal("foo", "v3") + }) +} + +func TestDb_RecoverDuringMemtableCompaction(t *testing.T) { + truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { + + h.stor.DelaySync(storage.TypeTable) + h.put("big1", strings.Repeat("x", 10000000)) + h.put("big2", strings.Repeat("y", 1000)) + h.put("bar", "v2") + h.stor.ReleaseSync(storage.TypeTable) + + h.reopenDB() + h.getVal("bar", "v2") + h.getVal("big1", strings.Repeat("x", 10000000)) + h.getVal("big2", strings.Repeat("y", 1000)) + }) +} + +func TestDb_MinorCompactionsHappen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) + defer h.close() + + n := 500 + + key := func(i int) string { + return fmt.Sprintf("key%06d", i) + } + + for i := 0; i < n; i++ { + h.put(key(i), key(i)+strings.Repeat("v", 1000)) + } + + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) + } + + h.reopenDB() + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) + } +} + +func TestDb_RecoverWithLargeJournal(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("big1", strings.Repeat("1", 200000)) + h.put("big2", strings.Repeat("2", 200000)) + h.put("small3", strings.Repeat("3", 10)) + h.put("small4", strings.Repeat("4", 10)) + h.tablesPerLevel("") + + // Make sure that if we re-open with a small write buffer size that + // we flush table files in the middle of a large journal file. + h.o.WriteBuffer = 100000 + h.reopenDB() + h.getVal("big1", strings.Repeat("1", 200000)) + h.getVal("big2", strings.Repeat("2", 200000)) + h.getVal("small3", strings.Repeat("3", 10)) + h.getVal("small4", strings.Repeat("4", 10)) + v := h.db.s.version() + if v.tLen(0) <= 1 { + t.Errorf("tables-0 less than one") + } + v.release() +} + +func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + WriteBuffer: 10000000, + Compression: opt.NoCompression, + }) + defer h.close() + + v := h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + v.release() + + n := 80 + + // Write 8MB (80 values, each 100K) + for i := 0; i < n; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } + + // Reopening moves updates to level-0 + h.reopenDB() + h.compactRangeAt(0, "", "") + + v = h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + if v.tLen(1) <= 1 { + t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) + } + v.release() + + for i := 0; i < n; i++ { + h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } +} + +func TestDb_RepeatedWritesToSameKey(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) + defer h.close() + + maxTables := kNumLevels + kL0_StopWritesTrigger + + value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) + for i := 0; i < 5*maxTables; i++ { + h.put("key", value) + n := h.totalTables() + if n > maxTables { + t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) + } + } +} + +func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) + defer h.close() + + h.reopenDB() + + maxTables := kNumLevels + kL0_StopWritesTrigger + + value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) + for i := 0; i < 5*maxTables; i++ { + h.put("key", value) + n := h.totalTables() + if n > maxTables { + t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) + } + } +} + +func TestDb_SparseMerge(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + defer h.close() + + h.putMulti(kNumLevels, "A", "Z") + + // Suppose there is: + // small amount of data with prefix A + // large amount of data with prefix B + // small amount of data with prefix C + // and that recent updates have made small changes to all three prefixes. + // Check that we do not do a compaction that merges all of B in one shot. + h.put("A", "va") + value := strings.Repeat("x", 1000) + for i := 0; i < 100000; i++ { + h.put(fmt.Sprintf("B%010d", i), value) + } + h.put("C", "vc") + h.compactMem() + h.compactRangeAt(0, "", "") + h.waitCompaction() + + // Make sparse update + h.put("A", "va2") + h.put("B100", "bvalue2") + h.put("C", "vc2") + h.compactMem() + + h.maxNextLevelOverlappingBytes(20 * 1048576) + h.compactRangeAt(0, "", "") + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) + h.compactRangeAt(1, "", "") + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) +} + +func TestDb_SizeOf(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + Compression: opt.NoCompression, + WriteBuffer: 10000000, + }) + defer h.close() + + h.sizeAssert("", "xyz", 0, 0) + h.reopenDB() + h.sizeAssert("", "xyz", 0, 0) + + // Write 8MB (80 values, each 100K) + n := 80 + s1 := 100000 + s2 := 105000 + + for i := 0; i < n; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) + } + + // 0 because SizeOf() does not account for memtable space + h.sizeAssert("", numKey(50), 0, 0) + + for r := 0; r < 3; r++ { + h.reopenDB() + + for cs := 0; cs < n; cs += 10 { + for i := 0; i < n; i += 10 { + h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) + h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) + h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) + } + + h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) + h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) + + h.compactRangeAt(0, numKey(cs), numKey(cs+9)) + } + + v := h.db.s.version() + if v.tLen(0) != 0 { + t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) + } + if v.tLen(1) == 0 { + t.Error("level-1 tables was zero") + } + v.release() + } +} + +func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + defer h.close() + + sizes := []uint64{ + 10000, + 10000, + 100000, + 10000, + 100000, + 10000, + 300000, + 10000, + } + + for i, n := range sizes { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) + } + + for r := 0; r < 3; r++ { + h.reopenDB() + + var x uint64 + for i, n := range sizes { + y := x + if i > 0 { + y += 1000 + } + h.sizeAssert("", numKey(i), x, y) + x += n + } + + h.sizeAssert(numKey(3), numKey(5), 110000, 111000) + + h.compactRangeAt(0, "", "") + } +} + +func TestDb_Snapshot(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + s1 := h.getSnapshot() + h.put("foo", "v2") + s2 := h.getSnapshot() + h.put("foo", "v3") + s3 := h.getSnapshot() + h.put("foo", "v4") + + h.getValr(s1, "foo", "v1") + h.getValr(s2, "foo", "v2") + h.getValr(s3, "foo", "v3") + h.getVal("foo", "v4") + + s3.Release() + h.getValr(s1, "foo", "v1") + h.getValr(s2, "foo", "v2") + h.getVal("foo", "v4") + + s1.Release() + h.getValr(s2, "foo", "v2") + h.getVal("foo", "v4") + + s2.Release() + h.getVal("foo", "v4") + }) +} + +func TestDb_HiddenValuesAreRemoved(t *testing.T) { + trun(t, func(h *dbHarness) { + s := h.db.s + + h.put("foo", "v1") + h.compactMem() + m := kMaxMemCompactLevel + v := s.version() + num := v.tLen(m) + v.release() + if num != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, num) + } + + // Place a table at level last-1 to prevent merging with preceding mutation + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + v = s.version() + if v.tLen(m) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) + } + if v.tLen(m-1) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) + } + v.release() + + h.delete("foo") + h.put("foo", "v2") + h.allEntriesFor("foo", "[ v2, DEL, v1 ]") + h.compactMem() + h.allEntriesFor("foo", "[ v2, DEL, v1 ]") + h.compactRangeAt(m-2, "", "z") + // DEL eliminated, but v1 remains because we aren't compacting that level + // (DEL can be eliminated because v2 hides v1). + h.allEntriesFor("foo", "[ v2, v1 ]") + h.compactRangeAt(m-1, "", "") + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + h.allEntriesFor("foo", "[ v2 ]") + }) +} + +func TestDb_DeletionMarkers2(t *testing.T) { + h := newDbHarness(t) + defer h.close() + s := h.db.s + + h.put("foo", "v1") + h.compactMem() + m := kMaxMemCompactLevel + v := s.version() + num := v.tLen(m) + v.release() + if num != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, num) + } + + // Place a table at level last-1 to prevent merging with preceding mutation + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + v = s.version() + if v.tLen(m) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) + } + if v.tLen(m-1) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) + } + v.release() + + h.delete("foo") + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactMem() // Moves to level last-2 + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactRangeAt(m-2, "", "") + // DEL kept: "last" file overlaps + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactRangeAt(m-1, "", "") + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + h.allEntriesFor("foo", "[ ]") +} + +func TestDb_CompactionTableOpenError(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1}) + defer h.close() + + im := 10 + jm := 10 + for r := 0; r < 2; r++ { + for i := 0; i < im; i++ { + for j := 0; j < jm; j++ { + h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) + } + h.compactMem() + } + } + + if n := h.totalTables(); n != im*2 { + t.Errorf("total tables is %d, want %d", n, im) + } + + h.stor.SetOpenErr(storage.TypeTable) + go h.db.CompactRange(util.Range{}) + if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { + t.Log("compaction error: ", err) + } + h.closeDB0() + h.openDB() + h.stor.SetOpenErr(0) + + for i := 0; i < im; i++ { + for j := 0; j < jm; j++ { + h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) + } + } +} + +func TestDb_OverlapInLevel0(t *testing.T) { + trun(t, func(h *dbHarness) { + if kMaxMemCompactLevel != 2 { + t.Fatal("fix test to reflect the config") + } + + // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. + h.put("100", "v100") + h.put("999", "v999") + h.compactMem() + h.delete("100") + h.delete("999") + h.compactMem() + h.tablesPerLevel("0,1,1") + + // Make files spanning the following ranges in level-0: + // files[0] 200 .. 900 + // files[1] 300 .. 500 + // Note that files are sorted by min key. + h.put("300", "v300") + h.put("500", "v500") + h.compactMem() + h.put("200", "v200") + h.put("600", "v600") + h.put("900", "v900") + h.compactMem() + h.tablesPerLevel("2,1,1") + + // Compact away the placeholder files we created initially + h.compactRangeAt(1, "", "") + h.compactRangeAt(2, "", "") + h.tablesPerLevel("2") + + // Do a memtable compaction. Before bug-fix, the compaction would + // not detect the overlap with level-0 files and would incorrectly place + // the deletion in a deeper level. + h.delete("600") + h.compactMem() + h.tablesPerLevel("3") + h.get("600", false) + }) +} + +func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.reopenDB() + h.put("b", "v") + h.reopenDB() + h.delete("b") + h.delete("a") + h.reopenDB() + h.delete("a") + h.reopenDB() + h.put("a", "v") + h.reopenDB() + h.reopenDB() + h.getKeyVal("(a->v)") + h.waitCompaction() + h.getKeyVal("(a->v)") +} + +func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.reopenDB() + h.put("", "") + h.reopenDB() + h.delete("e") + h.put("", "") + h.reopenDB() + h.put("c", "cv") + h.reopenDB() + h.put("", "") + h.reopenDB() + h.put("", "") + h.waitCompaction() + h.reopenDB() + h.put("d", "dv") + h.reopenDB() + h.put("", "") + h.reopenDB() + h.delete("d") + h.delete("b") + h.reopenDB() + h.getKeyVal("(->)(c->cv)") + h.waitCompaction() + h.getKeyVal("(->)(c->cv)") +} + +func TestDb_SingleEntryMemCompaction(t *testing.T) { + trun(t, func(h *dbHarness) { + for i := 0; i < 10; i++ { + h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) + h.compactMem() + h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) + h.compactMem() + h.put("k", "v") + h.compactMem() + h.put("", "") + h.compactMem() + h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) + h.compactMem() + } + }) +} + +func TestDb_ManifestWriteError(t *testing.T) { + for i := 0; i < 2; i++ { + func() { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "bar") + h.getVal("foo", "bar") + + // Mem compaction (will succeed) + h.compactMem() + h.getVal("foo", "bar") + v := h.db.s.version() + if n := v.tLen(kMaxMemCompactLevel); n != 1 { + t.Errorf("invalid total tables, want=1 got=%d", n) + } + v.release() + + if i == 0 { + h.stor.SetWriteErr(storage.TypeManifest) + } else { + h.stor.SetSyncErr(storage.TypeManifest) + } + + // Merging compaction (will fail) + h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true) + + h.db.Close() + h.stor.SetWriteErr(0) + h.stor.SetSyncErr(0) + + // Should not lose data + h.openDB() + h.getVal("foo", "bar") + }() + } +} + +func assertErr(t *testing.T, err error, wanterr bool) { + if err != nil { + if wanterr { + t.Log("AssertErr: got error (expected): ", err) + } else { + t.Error("AssertErr: got error: ", err) + } + } else if wanterr { + t.Error("AssertErr: expect error") + } +} + +func TestDb_ClosedIsClosed(t *testing.T) { + h := newDbHarness(t) + db := h.db + + var iter, iter2 iterator.Iterator + var snap *Snapshot + func() { + defer h.close() + + h.put("k", "v") + h.getVal("k", "v") + + iter = db.NewIterator(nil, h.ro) + iter.Seek([]byte("k")) + testKeyVal(t, iter, "k->v") + + var err error + snap, err = db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + + h.getValr(snap, "k", "v") + + iter2 = snap.NewIterator(nil, h.ro) + iter2.Seek([]byte("k")) + testKeyVal(t, iter2, "k->v") + + h.put("foo", "v2") + h.delete("foo") + + // closing DB + iter.Release() + iter2.Release() + }() + + assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) + _, err := db.Get([]byte("k"), h.ro) + assertErr(t, err, true) + + if iter.Valid() { + t.Errorf("iter.Valid should false") + } + assertErr(t, iter.Error(), false) + testKeyVal(t, iter, "->") + if iter.Seek([]byte("k")) { + t.Errorf("iter.Seek should false") + } + assertErr(t, iter.Error(), true) + + assertErr(t, iter2.Error(), false) + + _, err = snap.Get([]byte("k"), h.ro) + assertErr(t, err, true) + + _, err = db.GetSnapshot() + assertErr(t, err, true) + + iter3 := db.NewIterator(nil, h.ro) + assertErr(t, iter3.Error(), true) + + iter3 = snap.NewIterator(nil, h.ro) + assertErr(t, iter3.Error(), true) + + assertErr(t, db.Delete([]byte("k"), h.wo), true) + + _, err = db.GetProperty("leveldb.stats") + assertErr(t, err, true) + + _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) + assertErr(t, err, true) + + assertErr(t, db.CompactRange(util.Range{}), true) + + assertErr(t, db.Close(), true) +} + +type numberComparer struct{} + +func (numberComparer) num(x []byte) (n int) { + fmt.Sscan(string(x[1:len(x)-1]), &n) + return +} + +func (numberComparer) Name() string { + return "test.NumberComparer" +} + +func (p numberComparer) Compare(a, b []byte) int { + return p.num(a) - p.num(b) +} + +func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } +func (numberComparer) Successor(dst, b []byte) []byte { return nil } + +func TestDb_CustomComparer(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + Comparer: numberComparer{}, + WriteBuffer: 1000, + }) + defer h.close() + + h.put("[10]", "ten") + h.put("[0x14]", "twenty") + for i := 0; i < 2; i++ { + h.getVal("[10]", "ten") + h.getVal("[0xa]", "ten") + h.getVal("[20]", "twenty") + h.getVal("[0x14]", "twenty") + h.get("[15]", false) + h.get("[0xf]", false) + h.compactMem() + h.compactRange("[0]", "[9999]") + } + + for n := 0; n < 2; n++ { + for i := 0; i < 100; i++ { + v := fmt.Sprintf("[%d]", i*10) + h.put(v, v) + } + h.compactMem() + h.compactRange("[0]", "[1000000]") + } +} + +func TestDb_ManualCompaction(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + if kMaxMemCompactLevel != 2 { + t.Fatal("fix test to reflect the config") + } + + h.putMulti(3, "p", "q") + h.tablesPerLevel("1,1,1") + + // Compaction range falls before files + h.compactRange("", "c") + h.tablesPerLevel("1,1,1") + + // Compaction range falls after files + h.compactRange("r", "z") + h.tablesPerLevel("1,1,1") + + // Compaction range overlaps files + h.compactRange("p1", "p9") + h.tablesPerLevel("0,0,1") + + // Populate a different range + h.putMulti(3, "c", "e") + h.tablesPerLevel("1,1,2") + + // Compact just the new range + h.compactRange("b", "f") + h.tablesPerLevel("0,0,2") + + // Compact all + h.putMulti(1, "a", "z") + h.tablesPerLevel("0,1,2") + h.compactRange("", "") + h.tablesPerLevel("0,0,1") +} + +func TestDb_BloomFilter(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + BlockCache: opt.NoCache, + Filter: filter.NewBloomFilter(10), + }) + defer h.close() + + key := func(i int) string { + return fmt.Sprintf("key%06d", i) + } + + const ( + n = 10000 + indexOverheat = 19898 + filterOverheat = 19799 + ) + + // Populate multiple layers + for i := 0; i < n; i++ { + h.put(key(i), key(i)) + } + h.compactMem() + h.compactRange("a", "z") + for i := 0; i < n; i += 100 { + h.put(key(i), key(i)) + } + h.compactMem() + + // Prevent auto compactions triggered by seeks + h.stor.DelaySync(storage.TypeTable) + + // Lookup present keys. Should rarely read from small sstable. + h.stor.SetReadCounter(storage.TypeTable) + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)) + } + cnt := int(h.stor.ReadCounter()) + t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) + + if min, max := n+indexOverheat+filterOverheat, n+indexOverheat+filterOverheat+2*n/100; cnt < min || cnt > max { + t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) + } + + // Lookup missing keys. Should rarely read from either sstable. + h.stor.ResetReadCounter() + for i := 0; i < n; i++ { + h.get(key(i)+".missing", false) + } + cnt = int(h.stor.ReadCounter()) + t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) + if max := 3*n/100 + indexOverheat + filterOverheat; cnt > max { + t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) + } + + h.stor.ReleaseSync(storage.TypeTable) +} + +func TestDb_Concurrent(t *testing.T) { + const n, secs, maxkey = 4, 2, 1000 + + runtime.GOMAXPROCS(n) + trun(t, func(h *dbHarness) { + var closeWg sync.WaitGroup + var stop uint32 + var cnt [n]uint32 + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + var put, get, found uint + defer func() { + t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", + i, cnt[i], put, get, found, get-found) + closeWg.Done() + }() + + rnd := rand.New(rand.NewSource(int64(1000 + i))) + for atomic.LoadUint32(&stop) == 0 { + x := cnt[i] + + k := rnd.Intn(maxkey) + kstr := fmt.Sprintf("%016d", k) + + if (rnd.Int() % 2) > 0 { + put++ + h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) + } else { + get++ + v, err := h.db.Get([]byte(kstr), h.ro) + if err == nil { + found++ + rk, ri, rx := 0, -1, uint32(0) + fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) + if rk != k { + t.Errorf("invalid key want=%d got=%d", k, rk) + } + if ri < 0 || ri >= n { + t.Error("invalid goroutine number: ", ri) + } else { + tx := atomic.LoadUint32(&(cnt[ri])) + if rx > tx { + t.Errorf("invalid seq number, %d > %d ", rx, tx) + } + } + } else if err != ErrNotFound { + t.Error("Get: got error: ", err) + return + } + } + atomic.AddUint32(&cnt[i], 1) + } + }(i) + } + + time.Sleep(secs * time.Second) + atomic.StoreUint32(&stop, 1) + closeWg.Wait() + }) + + runtime.GOMAXPROCS(1) +} + +func TestDb_Concurrent2(t *testing.T) { + const n, n2 = 4, 4000 + + runtime.GOMAXPROCS(n*2 + 2) + truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { + var closeWg sync.WaitGroup + var stop uint32 + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 0; atomic.LoadUint32(&stop) == 0; k++ { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } + + cmp := comparer.DefaultComparer + for i := 0; i < n2; i++ { + closeWg.Add(1) + go func(i int) { + it := h.db.NewIterator(nil, nil) + var pk []byte + for it.Next() { + kk := it.Key() + if cmp.Compare(kk, pk) <= 0 { + t.Errorf("iter %d: %q is successor of %q", i, pk, kk) + } + pk = append(pk[:0], kk...) + var k, vk, vi int + if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { + t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) + } else if n < 1 { + t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) + } + if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { + t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) + } else if n < 2 { + t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) + } + + if vk != k { + t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) + } + } + if err := it.Error(); err != nil { + t.Errorf("iter %d: Got error: %v", i, err) + } + it.Release() + closeWg.Done() + }(i) + } + + atomic.StoreUint32(&stop, 1) + closeWg.Wait() + }) + + runtime.GOMAXPROCS(1) +} + +func TestDb_CreateReopenDbOnFile(t *testing.T) { + dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) + if err := os.RemoveAll(dbpath); err != nil { + t.Fatal("cannot remove old db: ", err) + } + defer os.RemoveAll(dbpath) + + for i := 0; i < 3; i++ { + stor, err := storage.OpenFile(dbpath) + if err != nil { + t.Fatalf("(%d) cannot open storage: %s", i, err) + } + db, err := Open(stor, nil) + if err != nil { + t.Fatalf("(%d) cannot open db: %s", i, err) + } + if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { + t.Fatalf("(%d) cannot write to db: %s", i, err) + } + if err := db.Close(); err != nil { + t.Fatalf("(%d) cannot close db: %s", i, err) + } + if err := stor.Close(); err != nil { + t.Fatalf("(%d) cannot close storage: %s", i, err) + } + } +} + +func TestDb_CreateReopenDbOnFile2(t *testing.T) { + dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) + if err := os.RemoveAll(dbpath); err != nil { + t.Fatal("cannot remove old db: ", err) + } + defer os.RemoveAll(dbpath) + + for i := 0; i < 3; i++ { + db, err := OpenFile(dbpath, nil) + if err != nil { + t.Fatalf("(%d) cannot open db: %s", i, err) + } + if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { + t.Fatalf("(%d) cannot write to db: %s", i, err) + } + if err := db.Close(); err != nil { + t.Fatalf("(%d) cannot close db: %s", i, err) + } + } +} + +func TestDb_DeletionMarkersOnMemdb(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "v1") + h.compactMem() + h.delete("foo") + h.get("foo", false) + h.getKeyVal("") +} + +func TestDb_LeveldbIssue178(t *testing.T) { + nKeys := (kMaxTableSize / 30) * 5 + key1 := func(i int) string { + return fmt.Sprintf("my_key_%d", i) + } + key2 := func(i int) string { + return fmt.Sprintf("my_key_%d_xxx", i) + } + + // Disable compression since it affects the creation of layers and the + // code below is trying to test against a very specific scenario. + h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) + defer h.close() + + // Create first key range. + batch := new(Batch) + for i := 0; i < nKeys; i++ { + batch.Put([]byte(key1(i)), []byte("value for range 1 key")) + } + h.write(batch) + + // Create second key range. + batch.Reset() + for i := 0; i < nKeys; i++ { + batch.Put([]byte(key2(i)), []byte("value for range 2 key")) + } + h.write(batch) + + // Delete second key range. + batch.Reset() + for i := 0; i < nKeys; i++ { + batch.Delete([]byte(key2(i))) + } + h.write(batch) + h.waitMemCompaction() + + // Run manual compaction. + h.compactRange(key1(0), key1(nKeys-1)) + + // Checking the keys. + h.assertNumKeys(nKeys) +} + +func TestDb_LeveldbIssue200(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("1", "b") + h.put("2", "c") + h.put("3", "d") + h.put("4", "e") + h.put("5", "f") + + iter := h.db.NewIterator(nil, h.ro) + + // Add an element that should not be reflected in the iterator. + h.put("25", "cd") + + iter.Seek([]byte("5")) + assertBytes(t, []byte("5"), iter.Key()) + iter.Prev() + assertBytes(t, []byte("4"), iter.Key()) + iter.Prev() + assertBytes(t, []byte("3"), iter.Key()) + iter.Next() + assertBytes(t, []byte("4"), iter.Key()) + iter.Next() + assertBytes(t, []byte("5"), iter.Key()) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go new file mode 100644 index 000000000..4da98a77e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -0,0 +1,97 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader is the interface that wraps basic Get and NewIterator methods. +// This interface implemented by both DB and Snapshot. +type Reader interface { + Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) + NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator +} + +type Sizes []uint64 + +// Sum returns sum of the sizes. +func (p Sizes) Sum() (n uint64) { + for _, s := range p { + n += s + } + return n +} + +// Logging. +func (db *DB) log(v ...interface{}) { db.s.log(v...) } +func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } + +// Check and clean files. +func (db *DB) checkAndCleanFiles() error { + v := db.s.version_NB() + tablesMap := make(map[uint64]bool) + for _, tables := range v.tables { + for _, t := range tables { + tablesMap[t.file.Num()] = false + } + } + + files, err := db.s.getFiles(storage.TypeAll) + if err != nil { + return err + } + + var nTables int + var rem []storage.File + for _, f := range files { + keep := true + switch f.Type() { + case storage.TypeManifest: + keep = f.Num() >= db.s.manifestFile.Num() + case storage.TypeJournal: + if db.frozenJournalFile != nil { + keep = f.Num() >= db.frozenJournalFile.Num() + } else { + keep = f.Num() >= db.journalFile.Num() + } + case storage.TypeTable: + _, keep = tablesMap[f.Num()] + if keep { + tablesMap[f.Num()] = true + nTables++ + } + } + + if !keep { + rem = append(rem, f) + } + } + + if nTables != len(tablesMap) { + for num, present := range tablesMap { + if !present { + db.logf("db@janitor table missing @%d", num) + } + } + return ErrCorrupted{Type: MissingFiles, Err: errors.New("leveldb: table files missing")} + } + + db.logf("db@janitor F·%d G·%d", len(files), len(rem)) + for _, f := range rem { + db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) + if err := f.Remove(); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go new file mode 100644 index 000000000..85be3867d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -0,0 +1,290 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func (db *DB) writeJournal(b *Batch) error { + w, err := db.journal.Next() + if err != nil { + return err + } + if _, err := w.Write(b.encode()); err != nil { + return err + } + if err := db.journal.Flush(); err != nil { + return err + } + if b.sync { + return db.journalWriter.Sync() + } + return nil +} + +func (db *DB) jWriter() { + defer db.closeW.Done() + for { + select { + case b := <-db.journalC: + if b != nil { + db.journalAckC <- db.writeJournal(b) + } + case _, _ = <-db.closeC: + return + } + } +} + +func (db *DB) rotateMem(n int) (mem *memDB, err error) { + // Wait for pending memdb compaction. + err = db.compSendIdle(db.mcompCmdC) + if err != nil { + return + } + + // Create new memdb and journal. + mem, err = db.newMem(n) + if err != nil { + return + } + + // Schedule memdb compaction. + db.compTrigger(db.mcompTriggerC) + return +} + +func (db *DB) flush(n int) (mem *memDB, nn int, err error) { + delayed := false + flush := func() (retry bool) { + v := db.s.version() + defer v.release() + mem = db.getEffectiveMem() + defer func() { + if retry { + mem.decref() + mem = nil + } + }() + nn = mem.mdb.Free() + switch { + case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed: + delayed = true + time.Sleep(time.Millisecond) + case nn >= n: + return false + case v.tLen(0) >= kL0_StopWritesTrigger: + delayed = true + err = db.compSendIdle(db.tcompCmdC) + if err != nil { + return false + } + default: + // Allow memdb to grow if it has no entry. + if mem.mdb.Len() == 0 { + nn = n + } else { + mem.decref() + mem, err = db.rotateMem(n) + if err == nil { + nn = mem.mdb.Free() + } else { + nn = 0 + } + } + return false + } + return true + } + start := time.Now() + for flush() { + } + if delayed { + db.logf("db@write delayed T·%v", time.Since(start)) + } + return +} + +// Write apply the given batch to the DB. The batch will be applied +// sequentially. +// +// It is safe to modify the contents of the arguments after Write returns. +func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { + err = db.ok() + if err != nil || b == nil || b.len() == 0 { + return + } + + b.init(wo.GetSync()) + + // The write happen synchronously. +retry: + select { + case db.writeC <- b: + if <-db.writeMergedC { + return <-db.writeAckC + } + goto retry + case db.writeLockC <- struct{}{}: + case _, _ = <-db.closeC: + return ErrClosed + } + + merged := 0 + defer func() { + <-db.writeLockC + for i := 0; i < merged; i++ { + db.writeAckC <- err + } + }() + + mem, memFree, err := db.flush(b.size()) + if err != nil { + return + } + defer mem.decref() + + // Calculate maximum size of the batch. + m := 1 << 20 + if x := b.size(); x <= 128<<10 { + m = x + (128 << 10) + } + m = minInt(m, memFree) + + // Merge with other batch. +drain: + for b.size() < m && !b.sync { + select { + case nb := <-db.writeC: + if b.size()+nb.size() <= m { + b.append(nb) + db.writeMergedC <- true + merged++ + } else { + db.writeMergedC <- false + break drain + } + default: + break drain + } + } + + // Set batch first seq number relative from last seq. + b.seq = db.seq + 1 + + // Write journal concurrently if it is large enough. + if b.size() >= (128 << 10) { + // Push the write batch to the journal writer + select { + case _, _ = <-db.closeC: + err = ErrClosed + return + case db.journalC <- b: + // Write into memdb + b.memReplay(mem.mdb) + } + // Wait for journal writer + select { + case _, _ = <-db.closeC: + err = ErrClosed + return + case err = <-db.journalAckC: + if err != nil { + // Revert memdb if error detected + b.revertMemReplay(mem.mdb) + return + } + } + } else { + err = db.writeJournal(b) + if err != nil { + return + } + b.memReplay(mem.mdb) + } + + // Set last seq number. + db.addSeq(uint64(b.len())) + + if b.size() >= memFree { + db.rotateMem(0) + } + return +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { + b := new(Batch) + b.Put(key, value) + return db.Write(b, wo) +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { + b := new(Batch) + b.Delete(key) + return db.Write(b, wo) +} + +func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { + iter := mem.NewIterator(nil) + defer iter.Release() + return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) +} + +// CompactRange compacts the underlying DB for the given key range. +// In particular, deleted and overwritten versions are discarded, +// and the data is rearranged to reduce the cost of operations +// needed to access the data. This operation should typically only +// be invoked by users who understand the underlying implementation. +// +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +// Therefore if both is nil then it will compact entire DB. +func (db *DB) CompactRange(r util.Range) error { + if err := db.ok(); err != nil { + return err + } + + select { + case db.writeLockC <- struct{}{}: + case _, _ = <-db.closeC: + return ErrClosed + } + + // Check for overlaps in memdb. + mem := db.getEffectiveMem() + defer mem.decref() + if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) { + // Memdb compaction. + if _, err := db.rotateMem(0); err != nil { + <-db.writeLockC + return err + } + <-db.writeLockC + if err := db.compSendIdle(db.mcompCmdC); err != nil { + return err + } + } else { + <-db.writeLockC + } + + // Table compaction. + return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go new file mode 100644 index 000000000..53f13bb24 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go @@ -0,0 +1,90 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package leveldb provides implementation of LevelDB key/value database. +// +// Create or open a database: +// +// db, err := leveldb.OpenFile("path/to/db", nil) +// ... +// defer db.Close() +// ... +// +// Read or modify the database content: +// +// // Remember that the contents of the returned slice should not be modified. +// data, err := db.Get([]byte("key"), nil) +// ... +// err = db.Put([]byte("key"), []byte("value"), nil) +// ... +// err = db.Delete([]byte("key"), nil) +// ... +// +// Iterate over database content: +// +// iter := db.NewIterator(nil, nil) +// for iter.Next() { +// // Remember that the contents of the returned slice should not be modified, and +// // only valid until the next call to Next. +// key := iter.Key() +// value := iter.Value() +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content with a particular prefix: +// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Seek-then-Iterate: +// +// iter := db.NewIterator(nil, nil) +// for ok := iter.Seek(key); ok; ok = iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content: +// +// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Batch writes: +// +// batch := new(leveldb.Batch) +// batch.Put([]byte("foo"), []byte("value")) +// batch.Put([]byte("bar"), []byte("another value")) +// batch.Delete([]byte("baz")) +// err = db.Write(batch, nil) +// ... +// +// Use bloom filter: +// +// o := &opt.Options{ +// Filter: filter.NewBloomFilter(10), +// } +// db, err := leveldb.OpenFile("path/to/db", o) +// ... +// defer db.Close() +// ... +package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go new file mode 100644 index 000000000..4fa488e5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go @@ -0,0 +1,38 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrNotFound = util.ErrNotFound + ErrSnapshotReleased = errors.New("leveldb: snapshot released") + ErrIterReleased = errors.New("leveldb: iterator released") + ErrClosed = errors.New("leveldb: closed") +) + +type CorruptionType int + +const ( + CorruptedManifest CorruptionType = iota + MissingFiles +) + +// ErrCorrupted is the type that wraps errors that indicate corruption in +// the database. +type ErrCorrupted struct { + Type CorruptionType + Err error +} + +func (e ErrCorrupted) Error() string { + return e.Err.Error() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go new file mode 100644 index 000000000..1694997d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go @@ -0,0 +1,58 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Leveldb external", func() { + o := &opt.Options{ + BlockCache: opt.NoCache, + BlockRestartInterval: 5, + BlockSize: 50, + Compression: opt.NoCompression, + CachedOpenFiles: -1, + Strict: opt.StrictAll, + WriteBuffer: 1000, + } + + Describe("write test", func() { + It("should do write correctly", func(done Done) { + db := newTestingDB(o, nil, nil) + t := testutil.DBTesting{ + DB: db, + Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), + } + testutil.DoDBTesting(&t) + db.TestClose() + done <- true + }, 20.0) + }) + + Describe("read test", func() { + testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { + // Building the DB. + db := newTestingDB(o, nil, nil) + kv.IterateShuffled(nil, func(i int, key, value []byte) { + err := db.TestPut(key, value) + Expect(err).NotTo(HaveOccurred()) + }) + testutil.Defer("teardown", func() { + db.TestClose() + }) + + return db + }) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go new file mode 100644 index 000000000..2d8748b6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" +) + +type iFilter struct { + filter.Filter +} + +func (f iFilter) Contains(filter, key []byte) bool { + return f.Filter.Contains(filter, iKey(key).ukey()) +} + +func (f iFilter) NewGenerator() filter.FilterGenerator { + return iFilterGenerator{f.Filter.NewGenerator()} +} + +type iFilterGenerator struct { + filter.FilterGenerator +} + +func (g iFilterGenerator) Add(key []byte) { + g.FilterGenerator.Add(iKey(key).ukey()) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go new file mode 100644 index 000000000..1bf222da5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go @@ -0,0 +1,116 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func bloomHash(key []byte) uint32 { + return util.Hash(key, 0xbc9f1d34) +} + +type bloomFilter int + +// The bloom filter serializes its parameters and is backward compatible +// with respect to them. Therefor, its parameters are not added to its +// name. +func (bloomFilter) Name() string { + return "leveldb.BuiltinBloomFilter" +} + +func (f bloomFilter) Contains(filter, key []byte) bool { + nBytes := len(filter) - 1 + if nBytes < 1 { + return false + } + nBits := uint32(nBytes * 8) + + // Use the encoded k so that we can read filters generated by + // bloom filters created using different parameters. + k := filter[nBytes] + if k > 30 { + // Reserved for potentially new encodings for short bloom filters. + // Consider it a match. + return true + } + + kh := bloomHash(key) + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < k; j++ { + bitpos := kh % nBits + if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { + return false + } + kh += delta + } + return true +} + +func (f bloomFilter) NewGenerator() FilterGenerator { + // Round down to reduce probing cost a little bit. + k := uint8(f * 69 / 100) // 0.69 =~ ln(2) + if k < 1 { + k = 1 + } else if k > 30 { + k = 30 + } + return &bloomFilterGenerator{ + n: int(f), + k: k, + } +} + +type bloomFilterGenerator struct { + n int + k uint8 + + keyHashes []uint32 +} + +func (g *bloomFilterGenerator) Add(key []byte) { + // Use double-hashing to generate a sequence of hash values. + // See analysis in [Kirsch,Mitzenmacher 2006]. + g.keyHashes = append(g.keyHashes, bloomHash(key)) +} + +func (g *bloomFilterGenerator) Generate(b Buffer) { + // Compute bloom filter size (in both bits and bytes) + nBits := uint32(len(g.keyHashes) * g.n) + // For small n, we can see a very high false positive rate. Fix it + // by enforcing a minimum bloom filter length. + if nBits < 64 { + nBits = 64 + } + nBytes := (nBits + 7) / 8 + nBits = nBytes * 8 + + dest := b.Alloc(int(nBytes) + 1) + dest[nBytes] = g.k + for _, kh := range g.keyHashes { + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < g.k; j++ { + bitpos := kh % nBits + dest[bitpos/8] |= (1 << (bitpos % 8)) + kh += delta + } + } + + g.keyHashes = g.keyHashes[:0] +} + +// NewBloomFilter creates a new initialized bloom filter for given +// bitsPerKey. +// +// Since bitsPerKey is persisted individually for each bloom filter +// serialization, bloom filters are backwards compatible with respect to +// changing bitsPerKey. This means that no big performance penalty will +// be experienced when changing the parameter. See documentation for +// opt.Options.Filter for more information. +func NewBloomFilter(bitsPerKey int) Filter { + return bloomFilter(bitsPerKey) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go new file mode 100644 index 000000000..5bae258c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "encoding/binary" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" + "testing" +) + +type harness struct { + t *testing.T + + bloom Filter + generator FilterGenerator + filter []byte +} + +func newHarness(t *testing.T) *harness { + bloom := NewBloomFilter(10) + return &harness{ + t: t, + bloom: bloom, + generator: bloom.NewGenerator(), + } +} + +func (h *harness) add(key []byte) { + h.generator.Add(key) +} + +func (h *harness) addNum(key uint32) { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], key) + h.add(b[:]) +} + +func (h *harness) build() { + b := &util.Buffer{} + h.generator.Generate(b) + h.filter = b.Bytes() +} + +func (h *harness) reset() { + h.filter = nil +} + +func (h *harness) filterLen() int { + return len(h.filter) +} + +func (h *harness) assert(key []byte, want, silent bool) bool { + got := h.bloom.Contains(h.filter, key) + if !silent && got != want { + h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) + } + return got +} + +func (h *harness) assertNum(key uint32, want, silent bool) bool { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], key) + return h.assert(b[:], want, silent) +} + +func TestBloomFilter_Empty(t *testing.T) { + h := newHarness(t) + h.build() + h.assert([]byte("hello"), false, false) + h.assert([]byte("world"), false, false) +} + +func TestBloomFilter_Small(t *testing.T) { + h := newHarness(t) + h.add([]byte("hello")) + h.add([]byte("world")) + h.build() + h.assert([]byte("hello"), true, false) + h.assert([]byte("world"), true, false) + h.assert([]byte("x"), false, false) + h.assert([]byte("foo"), false, false) +} + +func nextN(n int) int { + switch { + case n < 10: + n += 1 + case n < 100: + n += 10 + case n < 1000: + n += 100 + default: + n += 1000 + } + return n +} + +func TestBloomFilter_VaryingLengths(t *testing.T) { + h := newHarness(t) + var mediocre, good int + for n := 1; n < 10000; n = nextN(n) { + h.reset() + for i := 0; i < n; i++ { + h.addNum(uint32(i)) + } + h.build() + + got := h.filterLen() + want := (n * 10 / 8) + 40 + if got > want { + t.Errorf("filter len test failed, '%d' > '%d'", got, want) + } + + for i := 0; i < n; i++ { + h.assertNum(uint32(i), true, false) + } + + var rate float32 + for i := 0; i < 10000; i++ { + if h.assertNum(uint32(i+1000000000), true, true) { + rate++ + } + } + rate /= 10000 + if rate > 0.02 { + t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) + } + if rate > 0.0125 { + mediocre++ + } else { + good++ + } + } + t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) + if mediocre > good/5 { + t.Error("mediocre false positive rate is more than expected") + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go new file mode 100644 index 000000000..7a925c5a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go @@ -0,0 +1,60 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package filter provides interface and implementation of probabilistic +// data structure. +// +// The filter is resposible for creating small filter from a set of keys. +// These filter will then used to test whether a key is a member of the set. +// In many cases, a filter can cut down the number of disk seeks from a +// handful to a single disk seek per DB.Get call. +package filter + +// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. +type Buffer interface { + // Alloc allocs n bytes of slice from the buffer. This also advancing + // write offset. + Alloc(n int) []byte + + // Write appends the contents of p to the buffer. + Write(p []byte) (n int, err error) + + // WriteByte appends the byte c to the buffer. + WriteByte(c byte) error +} + +// Filter is the filter. +type Filter interface { + // Name returns the name of this policy. + // + // Note that if the filter encoding changes in an incompatible way, + // the name returned by this method must be changed. Otherwise, old + // incompatible filters may be passed to methods of this type. + Name() string + + // NewGenerator creates a new filter generator. + NewGenerator() FilterGenerator + + // Contains returns true if the filter contains the given key. + // + // The filter are filters generated by the filter generator. + Contains(filter, key []byte) bool +} + +// FilterGenerator is the filter generator. +type FilterGenerator interface { + // Add adds a key to the filter generator. + // + // The key may become invalid after call to this method end, therefor + // key must be copied if implementation require keeping key for later + // use. The key should not modified directly, doing so may cause + // undefined results. + Add(key []byte) + + // Generate generates filters based on keys passed so far. After call + // to Generate the filter generator maybe resetted, depends on implementation. + Generate(b Buffer) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go new file mode 100644 index 000000000..e76657e5e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go @@ -0,0 +1,58 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build go1.3 + +package leveldb + +import ( + "sync/atomic" + "testing" +) + +func BenchmarkDBReadConcurrent(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + for pb.Next() && iter.Next() { + } + }) +} + +func BenchmarkDBReadConcurrent2(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + var dir uint32 + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + if atomic.AddUint32(&dir, 1)%2 == 0 { + for pb.Next() && iter.Next() { + } + } else { + if pb.Next() && iter.Last() { + for pb.Next() && iter.Prev() { + } + } + } + }) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go new file mode 100644 index 000000000..8391e12b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go @@ -0,0 +1,158 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// BasicArray is the interface that wraps basic Len and Search method. +type BasicArray interface { + // Len returns length of the array. + Len() int + + // Search finds smallest index that point to a key that is greater + // than or equal to the given key. + Search(key []byte) int +} + +// Array is the interface that wraps BasicArray and basic Index method. +type Array interface { + BasicArray + + // Index returns key/value pair with index of i. + Index(i int) (key, value []byte) +} + +// Array is the interface that wraps BasicArray and basic Get method. +type ArrayIndexer interface { + BasicArray + + // Get returns a new data iterator with index of i. + Get(i int) Iterator +} + +type basicArrayIterator struct { + util.BasicReleaser + array BasicArray + pos int +} + +func (i *basicArrayIterator) Valid() bool { + return i.pos >= 0 && i.pos < i.array.Len() +} + +func (i *basicArrayIterator) First() bool { + if i.array.Len() == 0 { + i.pos = -1 + return false + } + i.pos = 0 + return true +} + +func (i *basicArrayIterator) Last() bool { + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = n - 1 + return true +} + +func (i *basicArrayIterator) Seek(key []byte) bool { + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = i.array.Search(key) + if i.pos >= n { + return false + } + return true +} + +func (i *basicArrayIterator) Next() bool { + i.pos++ + if n := i.array.Len(); i.pos >= n { + i.pos = n + return false + } + return true +} + +func (i *basicArrayIterator) Prev() bool { + i.pos-- + if i.pos < 0 { + i.pos = -1 + return false + } + return true +} + +func (i *basicArrayIterator) Error() error { return nil } + +type arrayIterator struct { + basicArrayIterator + array Array + pos int + key, value []byte +} + +func (i *arrayIterator) updateKV() { + if i.pos == i.basicArrayIterator.pos { + return + } + i.pos = i.basicArrayIterator.pos + if i.Valid() { + i.key, i.value = i.array.Index(i.pos) + } else { + i.key = nil + i.value = nil + } +} + +func (i *arrayIterator) Key() []byte { + i.updateKV() + return i.key +} + +func (i *arrayIterator) Value() []byte { + i.updateKV() + return i.value +} + +type arrayIteratorIndexer struct { + basicArrayIterator + array ArrayIndexer +} + +func (i *arrayIteratorIndexer) Get() Iterator { + if i.Valid() { + return i.array.Get(i.basicArrayIterator.pos) + } + return nil +} + +// NewArrayIterator returns an iterator from the given array. +func NewArrayIterator(array Array) Iterator { + return &arrayIterator{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + pos: -1, + } +} + +// NewArrayIndexer returns an index iterator from the given array. +func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { + return &arrayIteratorIndexer{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go new file mode 100644 index 000000000..00d96668b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go @@ -0,0 +1,30 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + . "github.com/onsi/ginkgo" + + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Array iterator", func() { + It("Should iterates and seeks correctly", func() { + // Build key/value. + kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewArrayIterator(kv), + } + testutil.DoIteratorTesting(&t) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go new file mode 100644 index 000000000..3a61c3d9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go @@ -0,0 +1,221 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorIndexer is the interface that wraps CommonIterator and basic Get +// method. IteratorIndexer provides index for indexed iterator. +type IteratorIndexer interface { + CommonIterator + + // Get returns a new data iterator for the current position, or nil if + // done. + Get() Iterator +} + +type indexedIterator struct { + util.BasicReleaser + index IteratorIndexer + strict bool + strictGet bool + + data Iterator + err error + errf func(err error) +} + +func (i *indexedIterator) setData() { + if i.data != nil { + i.data.Release() + } + i.data = i.index.Get() + if i.strictGet { + if err := i.data.Error(); err != nil { + i.err = err + } + } +} + +func (i *indexedIterator) clearData() { + if i.data != nil { + i.data.Release() + } + i.data = nil +} + +func (i *indexedIterator) dataErr() bool { + if i.errf != nil { + if err := i.data.Error(); err != nil { + i.errf(err) + } + } + if i.strict { + if err := i.data.Error(); err != nil { + i.err = err + return true + } + } + return false +} + +func (i *indexedIterator) Valid() bool { + return i.data != nil && i.data.Valid() +} + +func (i *indexedIterator) First() bool { + if i.err != nil { + return false + } + + if !i.index.First() { + i.clearData() + return false + } + i.setData() + return i.Next() +} + +func (i *indexedIterator) Last() bool { + if i.err != nil { + return false + } + + if !i.index.Last() { + i.clearData() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + return true +} + +func (i *indexedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } + + if !i.index.Seek(key) { + i.clearData() + return false + } + i.setData() + if !i.data.Seek(key) { + if i.dataErr() { + return false + } + i.clearData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Next() bool { + if i.err != nil { + return false + } + + switch { + case i.data != nil && !i.data.Next(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Next() { + return false + } + i.setData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Prev() bool { + if i.err != nil { + return false + } + + switch { + case i.data != nil && !i.data.Prev(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Prev() { + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + } + return true +} + +func (i *indexedIterator) Key() []byte { + if i.data == nil { + return nil + } + return i.data.Key() +} + +func (i *indexedIterator) Value() []byte { + if i.data == nil { + return nil + } + return i.data.Value() +} + +func (i *indexedIterator) Release() { + i.clearData() + i.index.Release() + i.BasicReleaser.Release() +} + +func (i *indexedIterator) Error() error { + if i.err != nil { + return i.err + } + if err := i.index.Error(); err != nil { + return err + } + return nil +} + +func (i *indexedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewIndexedIterator returns an indexed iterator. An index is iterator +// that returns another iterator, a data iterator. A data iterator is the +// iterator that contains actual key/value pairs. +// +// If strict is true then error yield by data iterator will halt the indexed +// iterator, on contrary if strict is false then the indexed iterator will +// ignore those error and move on to the next index. If strictGet is true and +// index.Get() yield an 'error iterator' then the indexed iterator will be halted. +// An 'error iterator' is iterator which its Error() method always return non-nil +// even before any 'seeks method' is called. +func NewIndexedIterator(index IteratorIndexer, strict, strictGet bool) Iterator { + return &indexedIterator{index: index, strict: strict, strictGet: strictGet} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go new file mode 100644 index 000000000..b22efedbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + "sort" + + . "github.com/onsi/ginkgo" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +type keyValue struct { + key []byte + testutil.KeyValue +} + +type keyValueIndex []keyValue + +func (x keyValueIndex) Search(key []byte) int { + return sort.Search(x.Len(), func(i int) bool { + return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 + }) +} + +func (x keyValueIndex) Len() int { return len(x) } +func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } +func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } + +var _ = testutil.Defer(func() { + Describe("Indexed iterator", func() { + Test := func(n ...int) func() { + if len(n) == 0 { + rnd := testutil.NewRand() + n = make([]int, rnd.Intn(17)+3) + for i := range n { + n[i] = rnd.Intn(19) + 1 + } + } + + return func() { + It("Should iterates and seeks correctly", func(done Done) { + // Build key/value. + index := make(keyValueIndex, len(n)) + sum := 0 + for _, x := range n { + sum += x + } + kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) + for i, j := 0, 0; i < len(n); i++ { + for x := n[i]; x > 0; x-- { + key, value := kv.Index(j) + index[i].key = key + index[i].Put(key, value) + j++ + } + } + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewIndexedIterator(NewArrayIndexer(index), true, true), + } + testutil.DoIteratorTesting(&t) + done <- true + }, 1.5) + } + } + + Describe("with 100 keys", Test(100)) + Describe("with 50-50 keys", Test(50, 50)) + Describe("with 50-1 keys", Test(50, 1)) + Describe("with 50-1-50 keys", Test(50, 1, 50)) + Describe("with 1-50 keys", Test(1, 50)) + Describe("with random N-keys", Test()) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go new file mode 100644 index 000000000..cd1df6de2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package iterator provides interface and implementation to traverse over +// contents of a database. +package iterator + +import ( + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorSeeker is the interface that wraps the 'seeks method'. +type IteratorSeeker interface { + // First moves the iterator to the first key/value pair. If the iterator + // only contains one key/value pair then First and Last whould moves + // to the same key/value pair. + // It returns whether such pair exist. + First() bool + + // Last moves the iterator to the last key/value pair. If the iterator + // only contains one key/value pair then First and Last whould moves + // to the same key/value pair. + // It returns whether such pair exist. + Last() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. + // It returns whether such pair exist. + // + // It is safe to modify the contents of the argument after Seek returns. + Seek(key []byte) bool + + // Next moves the iterator to the next key/value pair. + // It returns whether the iterator is exhausted. + Next() bool + + // Prev moves the iterator to the previous key/value pair. + // It returns whether the iterator is exhausted. + Prev() bool +} + +// CommonIterator is the interface that wraps common interator methods. +type CommonIterator interface { + IteratorSeeker + + // util.Releaser is the interface that wraps basic Release method. + // When called Release will releases any resources associated with the + // iterator. + util.Releaser + + // util.ReleaseSetter is the interface that wraps the basic SetReleaser + // method. + util.ReleaseSetter + + // TODO: Remove this when ready. + Valid() bool + + // Error returns any accumulated error. Exhausting all the key/value pairs + // is not considered to be an error. + Error() error +} + +// Iterator iterates over a DB's key/value pairs in key order. +// +// When encouter an error any 'seeks method' will return false and will +// yield no key/value pairs. The error can be queried by calling the Error +// method. Calling Release is still necessary. +// +// An iterator must be released after use, but it is not necessary to read +// an iterator until exhaustion. +// Also, an iterator is not necessarily goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +type Iterator interface { + CommonIterator + + // Key returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Key() []byte + + // Value returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Value() []byte +} + +// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback +// method. +// +// ErrorCallbackSetter implemented by indexed and merged iterator. +type ErrorCallbackSetter interface { + // SetErrorCallback allows set an error callback of the coresponding + // iterator. Use nil to clear the callback. + SetErrorCallback(f func(err error)) +} + +type emptyIterator struct { + releaser util.Releaser + released bool + err error +} + +func (i *emptyIterator) rErr() { + if i.err == nil && i.released { + i.err = errors.New("leveldb/iterator: iterator released") + } +} + +func (i *emptyIterator) Release() { + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + i.released = true +} + +func (i *emptyIterator) SetReleaser(releaser util.Releaser) { + if !i.released { + i.releaser = releaser + } +} + +func (*emptyIterator) Valid() bool { return false } +func (i *emptyIterator) First() bool { i.rErr(); return false } +func (i *emptyIterator) Last() bool { i.rErr(); return false } +func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } +func (i *emptyIterator) Next() bool { i.rErr(); return false } +func (i *emptyIterator) Prev() bool { i.rErr(); return false } +func (*emptyIterator) Key() []byte { return nil } +func (*emptyIterator) Value() []byte { return nil } +func (i *emptyIterator) Error() error { return i.err } + +// NewEmptyIterator creates an empty iterator. The err parameter can be +// nil, but if not nil the given err will be returned by Error method. +func NewEmptyIterator(err error) Iterator { + return &emptyIterator{err: err} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go new file mode 100644 index 000000000..ef8cdb14f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go @@ -0,0 +1,17 @@ +package iterator_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestIterator(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Iterator Suite") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go new file mode 100644 index 000000000..508f6a7aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go @@ -0,0 +1,307 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrIterReleased = errors.New("leveldb/iterator: iterator released") +) + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type mergedIterator struct { + cmp comparer.Comparer + iters []Iterator + strict bool + + keys [][]byte + index int + dir dir + err error + errf func(err error) + releaser util.Releaser +} + +func assertKey(key []byte) []byte { + if key == nil { + panic("leveldb/iterator: nil key") + } + return key +} + +func (i *mergedIterator) iterErr(iter Iterator) bool { + if i.errf != nil { + if err := iter.Error(); err != nil { + i.errf(err) + } + } + if i.strict { + if err := iter.Error(); err != nil { + i.err = err + return true + } + } + return false +} + +func (i *mergedIterator) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *mergedIterator) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.First(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirEOI + return i.prev() +} + +func (i *mergedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Seek(key): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) next() bool { + var key []byte + if i.dir == dirForward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirEOI + return false + } + i.dir = dirForward + return true +} + +func (i *mergedIterator) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirSOI: + return i.First() + case dirBackward: + key := append([]byte{}, i.keys[i.index]...) + if !i.Seek(key) { + return false + } + return i.Next() + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Next(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.next() +} + +func (i *mergedIterator) prev() bool { + var key []byte + if i.dir == dirBackward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirSOI + return false + } + i.dir = dirBackward + return true +} + +func (i *mergedIterator) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + key := append([]byte{}, i.keys[i.index]...) + for x, iter := range i.iters { + if x == i.index { + continue + } + seek := iter.Seek(key) + switch { + case seek && iter.Prev(), !seek && iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Prev(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.prev() +} + +func (i *mergedIterator) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.keys[i.index] +} + +func (i *mergedIterator) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.iters[i.index].Value() +} + +func (i *mergedIterator) Release() { + if i.dir != dirReleased { + i.dir = dirReleased + for _, iter := range i.iters { + iter.Release() + } + i.iters = nil + i.keys = nil + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *mergedIterator) SetReleaser(releaser util.Releaser) { + if i.dir != dirReleased { + i.releaser = releaser + } +} + +func (i *mergedIterator) Error() error { + return i.err +} + +func (i *mergedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewMergedIterator returns an iterator that merges its input. Walking the +// resultant iterator will return all key/value pairs of all input iterators +// in strictly increasing key order, as defined by cmp. +// The input's key ranges may overlap, but there are assumed to be no duplicate +// keys: if iters[i] contains a key k then iters[j] will not contain that key k. +// None of the iters may be nil. +// +// If strict is true then error yield by any iterators will halt the merged +// iterator, on contrary if strict is false then the merged iterator will +// ignore those error and move on to the next iterator. +func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { + return &mergedIterator{ + iters: iters, + cmp: cmp, + strict: strict, + keys: make([][]byte, len(iters)), + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go new file mode 100644 index 000000000..597b34132 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go @@ -0,0 +1,60 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Merged iterator", func() { + Test := func(filled int, empty int) func() { + return func() { + It("Should iterates and seeks correctly", func(done Done) { + rnd := testutil.NewRand() + + // Build key/value. + filledKV := make([]testutil.KeyValue, filled) + kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) + kv.Iterate(func(i int, key, value []byte) { + filledKV[rnd.Intn(filled)].Put(key, value) + }) + + // Create itearators. + iters := make([]Iterator, filled+empty) + for i := range iters { + if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { + filled-- + Expect(filledKV[filled].Len()).ShouldNot(BeZero()) + iters[i] = NewArrayIterator(filledKV[filled]) + } else { + empty-- + iters[i] = NewEmptyIterator(nil) + } + } + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), + } + testutil.DoIteratorTesting(&t) + done <- true + }, 1.5) + } + } + + Describe("with three, all filled iterators", Test(3, 0)) + Describe("with one filled, one empty iterators", Test(1, 1)) + Describe("with one filled, two empty iterators", Test(1, 2)) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go new file mode 100644 index 000000000..6fcf79fb9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -0,0 +1,520 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +// Package journal reads and writes sequences of journals. Each journal is a stream +// of bytes that completes before the next journal starts. +// +// When reading, call Next to obtain an io.Reader for the next journal. Next will +// return io.EOF when there are no more journals. It is valid to call Next +// without reading the current journal to exhaustion. +// +// When writing, call Next to obtain an io.Writer for the next journal. Calling +// Next finishes the current journal. Call Close to finish the final journal. +// +// Optionally, call Flush to finish the current journal and flush the underlying +// writer without starting a new journal. To start a new journal after flushing, +// call Next. +// +// Neither Readers or Writers are safe to use concurrently. +// +// Example code: +// func read(r io.Reader) ([]string, error) { +// var ss []string +// journals := journal.NewReader(r, nil, true, true) +// for { +// j, err := journals.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// return nil, err +// } +// s, err := ioutil.ReadAll(j) +// if err != nil { +// return nil, err +// } +// ss = append(ss, string(s)) +// } +// return ss, nil +// } +// +// func write(w io.Writer, ss []string) error { +// journals := journal.NewWriter(w) +// for _, s := range ss { +// j, err := journals.Next() +// if err != nil { +// return err +// } +// if _, err := j.Write([]byte(s)), err != nil { +// return err +// } +// } +// return journals.Close() +// } +// +// The wire format is that the stream is divided into 32KiB blocks, and each +// block contains a number of tightly packed chunks. Chunks cannot cross block +// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a +// block must be zero. +// +// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 +// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) +// followed by a payload. The checksum is over the chunk type and the payload. +// +// There are four chunk types: whether the chunk is the full journal, or the +// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal +// has one first chunk, zero or more middle chunks, and one last chunk. +// +// The wire format allows for limited recovery in the face of data corruption: +// on a format error (such as a checksum mismatch), the reader moves to the +// next block and looks for the next full or first chunk. +package journal + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// These constants are part of the wire format and should not be changed. +const ( + fullChunkType = 1 + firstChunkType = 2 + middleChunkType = 3 + lastChunkType = 4 +) + +const ( + blockSize = 32 * 1024 + headerSize = 7 +) + +type flusher interface { + Flush() error +} + +// ErrCorrupted is the error type that generated by corrupted block or chunk. +type ErrCorrupted struct { + Size int + Reason string +} + +func (e ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) +} + +// Dropper is the interface that wrap simple Drop method. The Drop +// method will be called when the journal reader dropping a block or chunk. +type Dropper interface { + Drop(err error) +} + +// Reader reads journals from an underlying io.Reader. +type Reader struct { + // r is the underlying reader. + r io.Reader + // the dropper. + dropper Dropper + // strict flag. + strict bool + // checksum flag. + checksum bool + // seq is the sequence number of the current journal. + seq int + // buf[i:j] is the unread portion of the current chunk's payload. + // The low bound, i, excludes the chunk header. + i, j int + // n is the number of bytes of buf that are valid. Once reading has started, + // only the final block can have n < blockSize. + n int + // last is whether the current chunk is the last chunk of the journal. + last bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewReader returns a new reader. The dropper may be nil, and if +// strict is true then corrupted or invalid chunk will halt the journal +// reader entirely. +func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { + return &Reader{ + r: r, + dropper: dropper, + strict: strict, + checksum: checksum, + last: true, + } +} + +var errSkip = errors.New("leveldb/journal: skipped") + +func (r *Reader) corrupt(n int, reason string, skip bool) error { + if r.dropper != nil { + r.dropper.Drop(ErrCorrupted{n, reason}) + } + if r.strict && !skip { + r.err = ErrCorrupted{n, reason} + return r.err + } + return errSkip +} + +// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the +// next block into the buffer if necessary. +func (r *Reader) nextChunk(first bool) error { + for { + if r.j+headerSize <= r.n { + checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) + length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) + chunkType := r.buf[r.j+6] + + if checksum == 0 && length == 0 && chunkType == 0 { + // Drop entire block. + m := r.n - r.j + r.i = r.n + r.j = r.n + return r.corrupt(m, "zero header", false) + } else { + m := r.n - r.j + r.i = r.j + headerSize + r.j = r.j + headerSize + int(length) + if r.j > r.n { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(m, "chunk length overflows block", false) + } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(m, "checksum mismatch", false) + } + } + if first && chunkType != fullChunkType && chunkType != firstChunkType { + m := r.j - r.i + r.i = r.j + // Report the error, but skip it. + return r.corrupt(m+headerSize, "orphan chunk", true) + } + r.last = chunkType == fullChunkType || chunkType == lastChunkType + return nil + } + + // The last block. + if r.n < blockSize && r.n > 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + + // Read block. + n, err := io.ReadFull(r.r, r.buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return err + } + if n == 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + r.i, r.j, r.n = 0, 0, n + } +} + +// Next returns a reader for the next journal. It returns io.EOF if there are no +// more journals. The reader returned becomes stale after the next Next call, +// and should no longer be used. If strict is false, the reader will returns +// io.ErrUnexpectedEOF error when found corrupted journal. +func (r *Reader) Next() (io.Reader, error) { + r.seq++ + if r.err != nil { + return nil, r.err + } + r.i = r.j + for { + if err := r.nextChunk(true); err == nil { + break + } else if err != errSkip { + return nil, err + } + } + return &singleReader{r, r.seq, nil}, nil +} + +// Reset resets the journal reader, allows reuse of the journal reader. Reset returns +// last accumulated error. +func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { + r.seq++ + err := r.err + r.r = reader + r.dropper = dropper + r.strict = strict + r.checksum = checksum + r.i = 0 + r.j = 0 + r.n = 0 + r.last = true + r.err = nil + return err +} + +type singleReader struct { + r *Reader + seq int + err error +} + +func (x *singleReader) Read(p []byte) (int, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + n := copy(p, r.buf[r.i:r.j]) + r.i += n + return n, nil +} + +func (x *singleReader) ReadByte() (byte, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + c := r.buf[r.i] + r.i++ + return c, nil +} + +// Writer writes journals to an underlying io.Writer. +type Writer struct { + // w is the underlying writer. + w io.Writer + // seq is the sequence number of the current journal. + seq int + // f is w as a flusher. + f flusher + // buf[i:j] is the bytes that will become the current chunk. + // The low bound, i, includes the chunk header. + i, j int + // buf[:written] has already been written to w. + // written is zero unless Flush has been called. + written int + // first is whether the current chunk is the first chunk of the journal. + first bool + // pending is whether a chunk is buffered but not yet written. + pending bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewWriter returns a new Writer. +func NewWriter(w io.Writer) *Writer { + f, _ := w.(flusher) + return &Writer{ + w: w, + f: f, + } +} + +// fillHeader fills in the header for the pending chunk. +func (w *Writer) fillHeader(last bool) { + if w.i+headerSize > w.j || w.j > blockSize { + panic("leveldb/journal: bad writer state") + } + if last { + if w.first { + w.buf[w.i+6] = fullChunkType + } else { + w.buf[w.i+6] = lastChunkType + } + } else { + if w.first { + w.buf[w.i+6] = firstChunkType + } else { + w.buf[w.i+6] = middleChunkType + } + } + binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) + binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) +} + +// writeBlock writes the buffered block to the underlying writer, and reserves +// space for the next chunk's header. +func (w *Writer) writeBlock() { + _, w.err = w.w.Write(w.buf[w.written:]) + w.i = 0 + w.j = headerSize + w.written = 0 +} + +// writePending finishes the current journal and writes the buffer to the +// underlying writer. +func (w *Writer) writePending() { + if w.err != nil { + return + } + if w.pending { + w.fillHeader(true) + w.pending = false + } + _, w.err = w.w.Write(w.buf[w.written:w.j]) + w.written = w.j +} + +// Close finishes the current journal and closes the writer. +func (w *Writer) Close() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + w.err = errors.New("leveldb/journal: closed Writer") + return nil +} + +// Flush finishes the current journal, writes to the underlying writer, and +// flushes it if that writer implements interface{ Flush() error }. +func (w *Writer) Flush() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + if w.f != nil { + w.err = w.f.Flush() + return w.err + } + return nil +} + +// Reset resets the journal writer, allows reuse of the journal writer. Reset +// will also closes the journal writer if not already. +func (w *Writer) Reset(writer io.Writer) (err error) { + w.seq++ + if w.err == nil { + w.writePending() + err = w.err + } + w.w = writer + w.f, _ = writer.(flusher) + w.i = 0 + w.j = 0 + w.written = 0 + w.first = false + w.pending = false + w.err = nil + return +} + +// Next returns a writer for the next journal. The writer returned becomes stale +// after the next Close, Flush or Next call, and should no longer be used. +func (w *Writer) Next() (io.Writer, error) { + w.seq++ + if w.err != nil { + return nil, w.err + } + if w.pending { + w.fillHeader(true) + } + w.i = w.j + w.j = w.j + headerSize + // Check if there is room in the block for the header. + if w.j > blockSize { + // Fill in the rest of the block with zeroes. + for k := w.i; k < blockSize; k++ { + w.buf[k] = 0 + } + w.writeBlock() + if w.err != nil { + return nil, w.err + } + } + w.first = true + w.pending = true + return singleWriter{w, w.seq}, nil +} + +type singleWriter struct { + w *Writer + seq int +} + +func (x singleWriter) Write(p []byte) (int, error) { + w := x.w + if w.seq != x.seq { + return 0, errors.New("leveldb/journal: stale writer") + } + if w.err != nil { + return 0, w.err + } + n0 := len(p) + for len(p) > 0 { + // Write a block, if it is full. + if w.j == blockSize { + w.fillHeader(false) + w.writeBlock() + if w.err != nil { + return 0, w.err + } + w.first = false + } + // Copy bytes into the buffer. + n := copy(w.buf[w.j:], p) + w.j += n + p = p[n:] + } + return n0, nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go new file mode 100644 index 000000000..0fcf22599 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go @@ -0,0 +1,818 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +package journal + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "strings" + "testing" +) + +type dropper struct { + t *testing.T +} + +func (d dropper) Drop(err error) { + d.t.Log(err) +} + +func short(s string) string { + if len(s) < 64 { + return s + } + return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) +} + +// big returns a string of length n, composed of repetitions of partial. +func big(partial string, n int) string { + return strings.Repeat(partial, n/len(partial)+1)[:n] +} + +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + r := NewReader(buf, dropper{t}, true, true) + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { + buf := new(bytes.Buffer) + + reset() + w := NewWriter(buf) + for { + s, ok := gen() + if !ok { + break + } + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write([]byte(s)); err != nil { + t.Fatal(err) + } + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + reset() + r := NewReader(buf, dropper{t}, true, true) + for { + s, ok := gen() + if !ok { + break + } + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + x, err := ioutil.ReadAll(rr) + if err != nil { + t.Fatal(err) + } + if string(x) != s { + t.Fatalf("got %q, want %q", short(string(x)), short(s)) + } + } + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testLiterals(t *testing.T, s []string) { + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == len(s) { + return "", false + } + i++ + return s[i-1], true + } + testGenerator(t, reset, gen) +} + +func TestMany(t *testing.T) { + const n = 1e5 + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return fmt.Sprintf("%d.", i-1), true + } + testGenerator(t, reset, gen) +} + +func TestRandom(t *testing.T) { + const n = 1e2 + var ( + i int + r *rand.Rand + ) + reset := func() { + i, r = 0, rand.New(rand.NewSource(0)) + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true + } + testGenerator(t, reset, gen) +} + +func TestBasic(t *testing.T) { + testLiterals(t, []string{ + strings.Repeat("a", 1000), + strings.Repeat("b", 97270), + strings.Repeat("c", 8000), + }) +} + +func TestBoundary(t *testing.T) { + for i := blockSize - 16; i < blockSize+16; i++ { + s0 := big("abcd", i) + for j := blockSize - 16; j < blockSize+16; j++ { + s1 := big("ABCDE", j) + testLiterals(t, []string{s0, s1}) + testLiterals(t, []string{s0, "", s1}) + testLiterals(t, []string{s0, "x", s1}) + } + } +} + +func TestFlush(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + // Write a couple of records. Everything should still be held + // in the record.Writer buffer, so that buf.Len should be 0. + w0, _ := w.Next() + w0.Write([]byte("0")) + w1, _ := w.Next() + w1.Write([]byte("11")) + if got, want := buf.Len(), 0; got != want { + t.Fatalf("buffer length #0: got %d want %d", got, want) + } + // Flush the record.Writer buffer, which should yield 17 bytes. + // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #1: got %d want %d", got, want) + } + // Do another write, one that isn't large enough to complete the block. + // The write should not have flowed through to buf. + w2, _ := w.Next() + w2.Write(bytes.Repeat([]byte("2"), 10000)) + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #2: got %d want %d", got, want) + } + // Flushing should get us up to 10024 bytes written. + // 10024 = 17 + 7 + 10000. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 10024; got != want { + t.Fatalf("buffer length #3: got %d want %d", got, want) + } + // Do a bigger write, one that completes the current block. + // We should now have 32768 bytes (a complete block), without + // an explicit flush. + w3, _ := w.Next() + w3.Write(bytes.Repeat([]byte("3"), 40000)) + if got, want := buf.Len(), 32768; got != want { + t.Fatalf("buffer length #4: got %d want %d", got, want) + } + // Flushing should get us up to 50038 bytes written. + // 50038 = 10024 + 2*7 + 40000. There are two headers because + // the one record was split into two chunks. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 50038; got != want { + t.Fatalf("buffer length #5: got %d want %d", got, want) + } + // Check that reading those records give the right lengths. + r := NewReader(buf, dropper{t}, true, true) + wants := []int64{1, 2, 10000, 40000} + for i, want := range wants { + rr, _ := r.Next() + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #%d: %v", i, err) + } + if n != want { + t.Fatalf("read #%d: got %d bytes want %d", i, n, want) + } + } +} + +func TestNonExhaustiveRead(t *testing.T) { + const n = 100 + buf := new(bytes.Buffer) + p := make([]byte, 10) + rnd := rand.New(rand.NewSource(1)) + + w := NewWriter(buf) + for i := 0; i < n; i++ { + length := len(p) + rnd.Intn(3*blockSize) + s := string(uint8(i)) + "123456789abcdefgh" + ww, _ := w.Next() + ww.Write([]byte(big(s, length))) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf, dropper{t}, true, true) + for i := 0; i < n; i++ { + rr, _ := r.Next() + _, err := io.ReadFull(rr, p) + if err != nil { + t.Fatal(err) + } + want := string(uint8(i)) + "123456789" + if got := string(p); got != want { + t.Fatalf("read #%d: got %q want %q", i, got, want) + } + } +} + +func TestStaleReader(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w0.Write([]byte("0")) + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1.Write([]byte("11")) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf, dropper{t}, true, true) + r0, err := r.Next() + if err != nil { + t.Fatal(err) + } + r1, err := r.Next() + if err != nil { + t.Fatal(err) + } + p := make([]byte, 1) + if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale read #0: unexpected error: %v", err) + } + if _, err := r1.Read(p); err != nil { + t.Fatalf("fresh read #1: got %v want nil error", err) + } + if p[0] != '1' { + t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) + } +} + +func TestStaleWriter(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #0: unexpected error: %v", err) + } + if _, err := w1.Write([]byte("11")); err != nil { + t.Fatalf("fresh write #1: got %v want nil error", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush: %v", err) + } + if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #1: unexpected error: %v", err) + } +} + +func TestCorrupt_MissingLastBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Cut the last block. + b := buf.Bytes()[:blockSize] + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read. + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if n != blockSize-1024 { + t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) + } + + // Second read. + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedFirstBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #0. + for i := 0; i < 1024; i++ { + b[i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (third record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize-headerSize) + 2; n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #1. + for i := 0; i < 1024; i++ { + b[blockSize+i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + // Third read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #2: %v", err) + } + if want := int64(blockSize-headerSize) + 2; n != want { + t.Fatalf("read #2: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedLastBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #3. + for i := len(b) - 1; i > len(b)-1024; i-- { + b[i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize - headerSize); n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + // Third read (third record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #2: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #2: got %d bytes want %d", n, want) + } + + // Fourth read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #3: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting record #1. + x := blockSize + binary.LittleEndian.PutUint16(b[x+4:], 0xffff) + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting record #1. + x := blockSize/2 + headerSize + binary.LittleEndian.PutUint16(b[x+4:], 0xffff) + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (third record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go new file mode 100644 index 000000000..b9acf932d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go @@ -0,0 +1,139 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" +) + +type vType int + +func (t vType) String() string { + switch t { + case tDel: + return "d" + case tVal: + return "v" + } + return "x" +} + +// Value types encoded as the last component of internal keys. +// Don't modify; this value are saved to disk. +const ( + tDel vType = iota + tVal +) + +// tSeek defines the vType that should be passed when constructing an +// internal key for seeking to a particular sequence number (since we +// sort sequence numbers in decreasing order and the value type is +// embedded as the low 8 bits in the sequence number in internal keys, +// we need to use the highest-numbered ValueType, not the lowest). +const tSeek = tVal + +const ( + // Maximum value possible for sequence number; the 8-bits are + // used by value type, so its can packed together in single + // 64-bit integer. + kMaxSeq uint64 = (uint64(1) << 56) - 1 + // Maximum value possible for packed sequence number and type. + kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek) +) + +// Maximum number encoded in bytes. +var kMaxNumBytes = make([]byte, 8) + +func init() { + binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) +} + +type iKey []byte + +func newIKey(ukey []byte, seq uint64, t vType) iKey { + if seq > kMaxSeq || t > tVal { + panic("invalid seq number or value type") + } + + b := make(iKey, len(ukey)+8) + copy(b, ukey) + binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t)) + return b +} + +func parseIkey(p []byte) (ukey []byte, seq uint64, t vType, ok bool) { + if len(p) < 8 { + return + } + num := binary.LittleEndian.Uint64(p[len(p)-8:]) + seq, t = uint64(num>>8), vType(num&0xff) + if t > tVal { + return + } + ukey = p[:len(p)-8] + ok = true + return +} + +func validIkey(p []byte) bool { + _, _, _, ok := parseIkey(p) + return ok +} + +func (p iKey) assert() { + if p == nil { + panic("nil iKey") + } + if len(p) < 8 { + panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p))) + } +} + +func (p iKey) ok() bool { + if len(p) < 8 { + return false + } + _, _, ok := p.parseNum() + return ok +} + +func (p iKey) ukey() []byte { + p.assert() + return p[:len(p)-8] +} + +func (p iKey) num() uint64 { + p.assert() + return binary.LittleEndian.Uint64(p[len(p)-8:]) +} + +func (p iKey) parseNum() (seq uint64, t vType, ok bool) { + if p == nil { + panic("nil iKey") + } + if len(p) < 8 { + return + } + num := p.num() + seq, t = uint64(num>>8), vType(num&0xff) + if t > tVal { + return 0, 0, false + } + ok = true + return +} + +func (p iKey) String() string { + if len(p) == 0 { + return "" + } + if seq, t, ok := p.parseNum(); ok { + return fmt.Sprintf("%s,%s%d", shorten(string(p.ukey())), t, seq) + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go new file mode 100644 index 000000000..2b055ecfa --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go @@ -0,0 +1,123 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" +) + +var defaultIComparer = &iComparer{comparer.DefaultComparer} + +func ikey(key string, seq uint64, t vType) iKey { + return newIKey([]byte(key), uint64(seq), t) +} + +func shortSep(a, b []byte) []byte { + dst := make([]byte, len(a)) + dst = defaultIComparer.Separator(dst[:0], a, b) + if dst == nil { + return a + } + return dst +} + +func shortSuccessor(b []byte) []byte { + dst := make([]byte, len(b)) + dst = defaultIComparer.Successor(dst[:0], b) + if dst == nil { + return b + } + return dst +} + +func testSingleKey(t *testing.T, key string, seq uint64, vt vType) { + ik := ikey(key, seq, vt) + + if !bytes.Equal(ik.ukey(), []byte(key)) { + t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) + } + + if rseq, rt, ok := ik.parseNum(); ok { + if rseq != seq { + t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) + } + + if rt != vt { + t.Errorf("type does not equal, got %v, want %v", rt, vt) + } + } else { + t.Error("cannot parse seq and type") + } +} + +func TestIKey_EncodeDecode(t *testing.T) { + keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} + seqs := []uint64{ + 1, 2, 3, + (1 << 8) - 1, 1 << 8, (1 << 8) + 1, + (1 << 16) - 1, 1 << 16, (1 << 16) + 1, + (1 << 32) - 1, 1 << 32, (1 << 32) + 1, + } + for _, key := range keys { + for _, seq := range seqs { + testSingleKey(t, key, seq, tVal) + testSingleKey(t, "hello", 1, tDel) + } + } +} + +func assertBytes(t *testing.T, want, got []byte) { + if !bytes.Equal(got, want) { + t.Errorf("assert failed, got %v, want %v", got, want) + } +} + +func TestIKeyShortSeparator(t *testing.T) { + // When user keys are same + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 99, tVal))) + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 101, tVal))) + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 100, tVal))) + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foo", 100, tDel))) + + // When user keys are misordered + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("bar", 99, tVal))) + + // When user keys are different, but correctly ordered + assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek), + shortSep(ikey("foo", 100, tVal), + ikey("hello", 200, tVal))) + + // When start user key is prefix of limit user key + assertBytes(t, ikey("foo", 100, tVal), + shortSep(ikey("foo", 100, tVal), + ikey("foobar", 200, tVal))) + + // When limit user key is prefix of start user key + assertBytes(t, ikey("foobar", 100, tVal), + shortSep(ikey("foobar", 100, tVal), + ikey("foo", 200, tVal))) +} + +func TestIKeyShortestSuccessor(t *testing.T) { + assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek), + shortSuccessor(ikey("foo", 100, tVal))) + assertBytes(t, ikey("\xff\xff", 100, tVal), + shortSuccessor(ikey("\xff\xff", 100, tVal))) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go new file mode 100644 index 000000000..797a42c6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go @@ -0,0 +1,20 @@ +package leveldb + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestLeveldb(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Leveldb Suite") + + RegisterTestingT(t) + testutil.RunDefer("teardown") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go new file mode 100644 index 000000000..47fe82cf6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package memdb + +import ( + "encoding/binary" + "math/rand" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" +) + +func BenchmarkPut(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + b.ResetTimer() + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } +} + +func BenchmarkPutRandom(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) + } + + b.ResetTimer() + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } +} + +func BenchmarkGet(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } + + b.ResetTimer() + for i := range buf { + p.Get(buf[i][:]) + } +} + +func BenchmarkGetRandom(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + p.Get(buf[rand.Int()%b.N][:]) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go new file mode 100644 index 000000000..7f63810f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go @@ -0,0 +1,452 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package memdb provides in-memory key/value database implementation. +package memdb + +import ( + "math/rand" + "sync" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrNotFound = util.ErrNotFound +) + +const tMaxHeight = 12 + +type dbIter struct { + util.BasicReleaser + p *DB + slice *util.Range + node int + forward bool + key, value []byte +} + +func (i *dbIter) fill(checkStart, checkLimit bool) bool { + if i.node != 0 { + n := i.p.nodeData[i.node] + m := n + i.p.nodeData[i.node+nKey] + i.key = i.p.kvData[n:m] + if i.slice != nil { + switch { + case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: + fallthrough + case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: + i.node = 0 + goto bail + } + } + i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] + return true + } +bail: + i.key = nil + i.value = nil + return false +} + +func (i *dbIter) Valid() bool { + return i.node != 0 +} + +func (i *dbIter) First() bool { + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil { + i.node, _ = i.p.findGE(i.slice.Start, false) + } else { + i.node = i.p.nodeData[nNext] + } + return i.fill(false, true) +} + +func (i *dbIter) Last() bool { + if i.p == nil { + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Limit != nil { + i.node = i.p.findLT(i.slice.Limit) + } else { + i.node = i.p.findLast() + } + return i.fill(true, false) +} + +func (i *dbIter) Seek(key []byte) bool { + if i.p == nil { + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { + key = i.slice.Start + } + i.node, _ = i.p.findGE(key, false) + return i.fill(false, true) +} + +func (i *dbIter) Next() bool { + if i.p == nil { + return false + } + if i.node == 0 { + if !i.forward { + return i.First() + } + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.nodeData[i.node+nNext] + return i.fill(false, true) +} + +func (i *dbIter) Prev() bool { + if i.p == nil { + return false + } + if i.node == 0 { + if i.forward { + return i.Last() + } + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.findLT(i.key) + return i.fill(true, false) +} + +func (i *dbIter) Key() []byte { + return i.key +} + +func (i *dbIter) Value() []byte { + return i.value +} + +func (i *dbIter) Error() error { return nil } + +func (i *dbIter) Release() { + if i.p != nil { + i.p = nil + i.node = 0 + i.key = nil + i.value = nil + i.BasicReleaser.Release() + } +} + +const ( + nKV = iota + nKey + nVal + nHeight + nNext +) + +// DB is an in-memory key/value database. +type DB struct { + cmp comparer.BasicComparer + rnd *rand.Rand + + mu sync.RWMutex + kvData []byte + // Node data: + // [0] : KV offset + // [1] : Key length + // [2] : Value length + // [3] : Height + // [3..height] : Next nodes + nodeData []int + prevNode [tMaxHeight]int + maxHeight int + n int + kvSize int +} + +func (p *DB) randHeight() (h int) { + const branching = 4 + h = 1 + for h < tMaxHeight && p.rnd.Int()%branching == 0 { + h++ + } + return +} + +func (p *DB) findGE(key []byte, prev bool) (int, bool) { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + cmp := 1 + if next != 0 { + o := p.nodeData[next] + cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) + } + if cmp < 0 { + // Keep searching in this list + node = next + } else { + if prev { + p.prevNode[h] = node + } else if cmp == 0 { + return next, true + } + if h == 0 { + return next, cmp == 0 + } + h-- + } + } +} + +func (p *DB) findLT(key []byte) int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + o := p.nodeData[next] + if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +func (p *DB) findLast() int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + if next == 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (p *DB) Put(key []byte, value []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + if node, exact := p.findGE(key, true); exact { + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + p.nodeData[node] = kvOffset + m := p.nodeData[node+nVal] + p.nodeData[node+nVal] = len(value) + p.kvSize += len(value) - m + return nil + } + + h := p.randHeight() + if h > p.maxHeight { + for i := p.maxHeight; i < h; i++ { + p.prevNode[i] = 0 + } + p.maxHeight = h + } + + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + // Node + node := len(p.nodeData) + p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) + for i, n := range p.prevNode[:h] { + m := n + 4 + i + p.nodeData = append(p.nodeData, p.nodeData[m]) + p.nodeData[m] = node + } + + p.kvSize += len(key) + len(value) + p.n++ + return nil +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (p *DB) Delete(key []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + node, exact := p.findGE(key, true) + if !exact { + return ErrNotFound + } + + h := p.nodeData[node+nHeight] + for i, n := range p.prevNode[:h] { + m := n + 4 + i + p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] + } + + p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] + p.n-- + return nil +} + +// Contains returns true if the given key are in the DB. +// +// It is safe to modify the contents of the arguments after Contains returns. +func (p *DB) Contains(key []byte) bool { + p.mu.RLock() + _, exact := p.findGE(key, false) + p.mu.RUnlock() + return exact +} + +// Get gets the value for the given key. It returns error.ErrNotFound if the +// DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (p *DB) Get(key []byte) (value []byte, err error) { + p.mu.RLock() + if node, exact := p.findGE(key, false); exact { + o := p.nodeData[node] + p.nodeData[node+nKey] + value = p.kvData[o : o+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (p *DB) Find(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node, _ := p.findGE(key, false); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// NewIterator returns an iterator of the DB. +// The returned iterator is not goroutine-safe, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. However, the resultant key/value pairs are not guaranteed +// to be a consistent snapshot of the DB at a particular point in time. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { + return &dbIter{p: p, slice: slice} +} + +// Capacity returns keys/values buffer capacity. +func (p *DB) Capacity() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) +} + +// Size returns sum of keys and values length. Note that deleted +// key/value will not be accouted for, but it will still consume +// the buffer, since the buffer is append only. +func (p *DB) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.kvSize +} + +// Free returns keys/values free buffer before need to grow. +func (p *DB) Free() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) - len(p.kvData) +} + +// Len returns the number of entries in the DB. +func (p *DB) Len() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.n +} + +// Reset resets the DB to initial empty state. Allows reuse the buffer. +func (p *DB) Reset() { + p.rnd = rand.New(rand.NewSource(0xdeadbeef)) + p.maxHeight = 1 + p.n = 0 + p.kvSize = 0 + p.kvData = p.kvData[:0] + p.nodeData = p.nodeData[:4+tMaxHeight] + p.nodeData[nKV] = 0 + p.nodeData[nKey] = 0 + p.nodeData[nVal] = 0 + p.nodeData[nHeight] = tMaxHeight + for n := 0; n < tMaxHeight; n++ { + p.nodeData[4+n] = 0 + p.prevNode[n] = 0 + } +} + +// New creates a new initalized in-memory key/value DB. The capacity +// is the initial key/value buffer capacity. The capacity is advisory, +// not enforced. +// +// The returned DB instance is goroutine-safe. +func New(cmp comparer.BasicComparer, capacity int) *DB { + p := &DB{ + cmp: cmp, + rnd: rand.New(rand.NewSource(0xdeadbeef)), + maxHeight: 1, + kvData: make([]byte, 0, capacity), + nodeData: make([]int, 4+tMaxHeight), + } + p.nodeData[nHeight] = tMaxHeight + return p +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go new file mode 100644 index 000000000..171289ecc --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go @@ -0,0 +1,17 @@ +package memdb + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestMemdb(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Memdb Suite") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go new file mode 100644 index 000000000..d9542e9fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go @@ -0,0 +1,135 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package memdb + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node := p.findLT(key); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +func (p *DB) TestFindLast() (rkey, value []byte, err error) { + p.mu.RLock() + if node := p.findLast(); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +func (p *DB) TestPut(key []byte, value []byte) error { + p.Put(key, value) + return nil +} + +func (p *DB) TestDelete(key []byte) error { + p.Delete(key) + return nil +} + +func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { + return p.Find(key) +} + +func (p *DB) TestGet(key []byte) (value []byte, err error) { + return p.Get(key) +} + +func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { + return p.NewIterator(slice) +} + +var _ = testutil.Defer(func() { + Describe("Memdb", func() { + Describe("write test", func() { + It("should do write correctly", func() { + db := New(comparer.DefaultComparer, 0) + t := testutil.DBTesting{ + DB: db, + Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), + PostFn: func(t *testutil.DBTesting) { + Expect(db.Len()).Should(Equal(t.Present.Len())) + Expect(db.Size()).Should(Equal(t.Present.Size())) + switch t.Act { + case testutil.DBPut, testutil.DBOverwrite: + Expect(db.Contains(t.ActKey)).Should(BeTrue()) + default: + Expect(db.Contains(t.ActKey)).Should(BeFalse()) + } + }, + } + testutil.DoDBTesting(&t) + }) + }) + + Describe("read test", func() { + testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { + // Building the DB. + db := New(comparer.DefaultComparer, 0) + kv.IterateShuffled(nil, func(i int, key, value []byte) { + db.Put(key, value) + }) + + if kv.Len() > 1 { + It("Should find correct keys with findLT", func() { + testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { + key_, key, _ := kv.IndexInexact(i + 1) + expectedKey, expectedValue := kv.Index(i) + + // Using key that exist. + rkey, rvalue, err := db.TestFindLT(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) + Expect(rkey).Should(Equal(expectedKey), "Key") + Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) + + // Using key that doesn't exist. + rkey, rvalue, err = db.TestFindLT(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) + Expect(rkey).Should(Equal(expectedKey)) + Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) + }) + }) + } + + if kv.Len() > 0 { + It("Should find last key with findLast", func() { + key, value := kv.Index(kv.Len() - 1) + rkey, rvalue, err := db.TestFindLast() + Expect(err).ShouldNot(HaveOccurred()) + Expect(rkey).Should(Equal(key)) + Expect(rvalue).Should(Equal(value)) + }) + } + + return db + }) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go new file mode 100644 index 000000000..c2c474a8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -0,0 +1,326 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package opt provides sets of options used by LevelDB. +package opt + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" +) + +const ( + KiB = 1024 + MiB = KiB * 1024 + GiB = MiB * 1024 +) + +const ( + DefaultBlockCacheSize = 8 * MiB + DefaultBlockRestartInterval = 16 + DefaultBlockSize = 4 * KiB + DefaultCompressionType = SnappyCompression + DefaultCachedOpenFiles = 500 + DefaultWriteBuffer = 4 * MiB +) + +type noCache struct{} + +func (noCache) SetCapacity(capacity int) {} +func (noCache) Capacity() int { return 0 } +func (noCache) Used() int { return 0 } +func (noCache) Size() int { return 0 } +func (noCache) NumObjects() int { return 0 } +func (noCache) GetNamespace(id uint64) cache.Namespace { return nil } +func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {} +func (noCache) ZapNamespace(id uint64) {} +func (noCache) Purge(fin cache.PurgeFin) {} +func (noCache) Zap() {} + +var NoCache cache.Cache = noCache{} + +// Compression is the per-block compression algorithm to use. +type Compression uint + +func (c Compression) String() string { + switch c { + case DefaultCompression: + return "default" + case NoCompression: + return "none" + case SnappyCompression: + return "snappy" + } + return "invalid" +} + +const ( + DefaultCompression Compression = iota + NoCompression + SnappyCompression + nCompression +) + +// Strict is the DB strict level. +type Strict uint + +const ( + // If present then a corrupted or invalid chunk or block in manifest + // journal will cause an error istead of being dropped. + StrictManifest Strict = 1 << iota + + // If present then a corrupted or invalid chunk or block in journal + // will cause an error istead of being dropped. + StrictJournal + + // If present then journal chunk checksum will be verified. + StrictJournalChecksum + + // If present then an invalid key/value pair will cause an error + // instead of being skipped. + StrictIterator + + // If present then 'sorted table' block checksum will be verified. + StrictBlockChecksum + + // StrictAll enables all strict flags. + StrictAll = StrictManifest | StrictJournal | StrictJournalChecksum | StrictIterator | StrictBlockChecksum + + // DefaultStrict is the default strict flags. Specify any strict flags + // will override default strict flags as whole (i.e. not OR'ed). + DefaultStrict = StrictJournalChecksum | StrictBlockChecksum + + // NoStrict disables all strict flags. Override default strict flags. + NoStrict = ^StrictAll +) + +// Options holds the optional parameters for the DB at large. +type Options struct { + // AltFilters defines one or more 'alternative filters'. + // 'alternative filters' will be used during reads if a filter block + // does not match with the 'effective filter'. + // + // The default value is nil + AltFilters []filter.Filter + + // BlockCache provides per-block caching for LevelDB. Specify NoCache to + // disable block caching. + // + // By default LevelDB will create LRU-cache with capacity of 8MiB. + BlockCache cache.Cache + + // BlockRestartInterval is the number of keys between restart points for + // delta encoding of keys. + // + // The default value is 16. + BlockRestartInterval int + + // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' + // block. + // + // The default value is 4KiB. + BlockSize int + + // CachedOpenFiles defines number of open files to kept around when not + // in-use, the counting includes still in-use files. + // Set this to negative value to disable caching. + // + // The default value is 500. + CachedOpenFiles int + + // Comparer defines a total ordering over the space of []byte keys: a 'less + // than' relationship. The same comparison algorithm must be used for reads + // and writes over the lifetime of the DB. + // + // The default value uses the same ordering as bytes.Compare. + Comparer comparer.Comparer + + // Compression defines the per-block compression to use. + // + // The default value (DefaultCompression) uses snappy compression. + Compression Compression + + // ErrorIfExist defines whether an error should returned if the DB already + // exist. + // + // The default value is false. + ErrorIfExist bool + + // ErrorIfMissing defines whether an error should returned if the DB is + // missing. If false then the database will be created if missing, otherwise + // an error will be returned. + // + // The default value is false. + ErrorIfMissing bool + + // Filter defines an 'effective filter' to use. An 'effective filter' + // if defined will be used to generate per-table filter block. + // The filter name will be stored on disk. + // During reads LevelDB will try to find matching filter from + // 'effective filter' and 'alternative filters'. + // + // Filter can be changed after a DB has been created. It is recommended + // to put old filter to the 'alternative filters' to mitigate lack of + // filter during transition period. + // + // A filter is used to reduce disk reads when looking for a specific key. + // + // The default value is nil. + Filter filter.Filter + + // Strict defines the DB strict level. + Strict Strict + + // WriteBuffer defines maximum size of a 'memdb' before flushed to + // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk + // unsorted journal. + // + // LevelDB may held up to two 'memdb' at the same time. + // + // The default value is 4MiB. + WriteBuffer int +} + +func (o *Options) GetAltFilters() []filter.Filter { + if o == nil { + return nil + } + return o.AltFilters +} + +func (o *Options) GetBlockCache() cache.Cache { + if o == nil { + return nil + } + return o.BlockCache +} + +func (o *Options) GetBlockRestartInterval() int { + if o == nil || o.BlockRestartInterval <= 0 { + return DefaultBlockRestartInterval + } + return o.BlockRestartInterval +} + +func (o *Options) GetBlockSize() int { + if o == nil || o.BlockSize <= 0 { + return DefaultBlockSize + } + return o.BlockSize +} + +func (o *Options) GetCachedOpenFiles() int { + if o == nil || o.CachedOpenFiles == 0 { + return DefaultCachedOpenFiles + } else if o.CachedOpenFiles < 0 { + return 0 + } + return o.CachedOpenFiles +} + +func (o *Options) GetComparer() comparer.Comparer { + if o == nil || o.Comparer == nil { + return comparer.DefaultComparer + } + return o.Comparer +} + +func (o *Options) GetCompression() Compression { + if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { + return DefaultCompressionType + } + return o.Compression +} + +func (o *Options) GetErrorIfExist() bool { + if o == nil { + return false + } + return o.ErrorIfExist +} + +func (o *Options) GetErrorIfMissing() bool { + if o == nil { + return false + } + return o.ErrorIfMissing +} + +func (o *Options) GetFilter() filter.Filter { + if o == nil { + return nil + } + return o.Filter +} + +func (o *Options) GetStrict(strict Strict) bool { + if o == nil || o.Strict == 0 { + return DefaultStrict&strict != 0 + } + return o.Strict&strict != 0 +} + +func (o *Options) GetWriteBuffer() int { + if o == nil || o.WriteBuffer <= 0 { + return DefaultWriteBuffer + } + return o.WriteBuffer +} + +// ReadOptions holds the optional parameters for 'read operation'. The +// 'read operation' includes Get, Find and NewIterator. +type ReadOptions struct { + // DontFillCache defines whether block reads for this 'read operation' + // should be cached. If false then the block will be cached. This does + // not affects already cached block. + // + // The default value is false. + DontFillCache bool + + // Strict overrides global DB strict level. Only StrictIterator and + // StrictBlockChecksum that does have effects here. + Strict Strict +} + +func (ro *ReadOptions) GetDontFillCache() bool { + if ro == nil { + return false + } + return ro.DontFillCache +} + +func (ro *ReadOptions) GetStrict(strict Strict) bool { + if ro == nil { + return false + } + return ro.Strict&strict != 0 +} + +// WriteOptions holds the optional parameters for 'write operation'. The +// 'write operation' includes Write, Put and Delete. +type WriteOptions struct { + // Sync is whether to sync underlying writes from the OS buffer cache + // through to actual disk, if applicable. Setting Sync can result in + // slower writes. + // + // If false, and the machine crashes, then some recent writes may be lost. + // Note that if it is just the process that crashes (and the machine does + // not) then no writes will be lost. + // + // In other words, Sync being false has the same semantics as a write + // system call. Sync being true means write followed by fsync. + // + // The default value is false. + Sync bool +} + +func (wo *WriteOptions) GetSync() bool { + if wo == nil { + return false + } + return wo.Sync +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go new file mode 100644 index 000000000..9d3f05ccc --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go @@ -0,0 +1,41 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" +) + +func (s *session) setOptions(o *opt.Options) { + s.o = &opt.Options{} + if o != nil { + *s.o = *o + } + // Alternative filters. + if filters := o.GetAltFilters(); len(filters) > 0 { + s.o.AltFilters = make([]filter.Filter, len(filters)) + for i, filter := range filters { + s.o.AltFilters[i] = &iFilter{filter} + } + } + // Block cache. + switch o.GetBlockCache() { + case nil: + s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize) + case opt.NoCache: + s.o.BlockCache = nil + } + // Comparer. + s.icmp = &iComparer{o.GetComparer()} + s.o.Comparer = s.icmp + // Filter. + if filter := o.GetFilter(); filter != nil { + s.o.Filter = &iFilter{filter} + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go new file mode 100644 index 000000000..94c05844f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go @@ -0,0 +1,396 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "io" + "os" + "sync" + "sync/atomic" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// session represent a persistent database session. +type session struct { + // Need 64-bit alignment. + stFileNum uint64 // current unused file number + stJournalNum uint64 // current journal file number; need external synchronization + stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stSeq uint64 // last mem compacted seq; need external synchronization + stTempFileNum uint64 + + stor storage.Storage + storLock util.Releaser + o *opt.Options + icmp *iComparer + tops *tOps + + manifest *journal.Writer + manifestWriter storage.Writer + manifestFile storage.File + + stCptrs [kNumLevels]iKey // compact pointers; need external synchronization + stVersion *version // current version + vmu sync.Mutex +} + +// Creates new initialized session instance. +func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { + if stor == nil { + return nil, os.ErrInvalid + } + storLock, err := stor.Lock() + if err != nil { + return + } + s = &session{ + stor: stor, + storLock: storLock, + } + s.setOptions(o) + s.tops = newTableOps(s, s.o.GetCachedOpenFiles()) + s.setVersion(&version{s: s}) + s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed") + return +} + +// Close session. +func (s *session) close() { + s.tops.close() + if bc := s.o.GetBlockCache(); bc != nil { + bc.Purge(nil) + } + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + s.manifest = nil + s.manifestWriter = nil + s.manifestFile = nil + s.stVersion = nil +} + +// Release session lock. +func (s *session) release() { + s.storLock.Release() +} + +// Create a new database session; need external synchronization. +func (s *session) create() error { + // create manifest + return s.newManifest(nil, nil) +} + +// Recover a database session; need external synchronization. +func (s *session) recover() (err error) { + defer func() { + if os.IsNotExist(err) { + // Don't return os.ErrNotExist if the underlying storage contains + // other files that belong to LevelDB. So the DB won't get trashed. + if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { + err = ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest file missing")} + } + } + }() + + file, err := s.stor.GetManifest() + if err != nil { + return + } + + reader, err := file.Open() + if err != nil { + return + } + defer reader.Close() + strict := s.o.GetStrict(opt.StrictManifest) + jr := journal.NewReader(reader, dropper{s, file}, strict, true) + + staging := s.version_NB().newStaging() + rec := &sessionRecord{} + for { + var r io.Reader + r, err = jr.Next() + if err != nil { + if err == io.EOF { + err = nil + break + } + return + } + + err = rec.decode(r) + if err == nil { + // save compact pointers + for _, r := range rec.compactionPointers { + s.stCptrs[r.level] = iKey(r.ikey) + } + // commit record to version staging + staging.commit(rec) + } else if strict { + return ErrCorrupted{Type: CorruptedManifest, Err: err} + } else { + s.logf("manifest error: %v (skipped)", err) + } + rec.resetCompactionPointers() + rec.resetAddedTables() + rec.resetDeletedTables() + } + + switch { + case !rec.has(recComparer): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")} + case rec.comparer != s.icmp.uName(): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")} + case !rec.has(recNextNum): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")} + case !rec.has(recJournalNum): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing journal file number")} + case !rec.has(recSeq): + return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing seq number")} + } + + s.manifestFile = file + s.setVersion(staging.finish()) + s.setFileNum(rec.nextNum) + s.recordCommited(rec) + return nil +} + +// Commit session; need external synchronization. +func (s *session) commit(r *sessionRecord) (err error) { + // spawn new version based on current version + nv := s.version_NB().spawn(r) + + if s.manifest == nil { + // manifest journal writer not yet created, create one + err = s.newManifest(r, nv) + } else { + err = s.flushManifest(r) + } + + // finally, apply new version if no error rise + if err == nil { + s.setVersion(nv) + } + + return +} + +// Pick a compaction based on current state; need external synchronization. +func (s *session) pickCompaction() *compaction { + v := s.version_NB() + + var level int + var t0 tFiles + if v.cScore >= 1 { + level = v.cLevel + cptr := s.stCptrs[level] + tables := v.tables[level] + for _, t := range tables { + if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { + t0 = append(t0, t) + break + } + } + if len(t0) == 0 { + t0 = append(t0, tables[0]) + } + } else { + if p := atomic.LoadPointer(&v.cSeek); p != nil { + ts := (*tSet)(p) + level = ts.level + t0 = append(t0, ts.table) + } else { + return nil + } + } + + c := &compaction{s: s, v: v, level: level} + if level == 0 { + imin, imax := t0.getRange(s.icmp) + t0 = v.tables[0].getOverlaps(t0[:0], s.icmp, imin.ukey(), imax.ukey(), true) + } + + c.tables[0] = t0 + c.expand() + return c +} + +// Create compaction from given level and range; need external synchronization. +func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { + v := s.version_NB() + + t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) + if len(t0) == 0 { + return nil + } + + // Avoid compacting too much in one shot in case the range is large. + // But we cannot do this for level-0 since level-0 files can overlap + // and we must not pick one file and drop another older file if the + // two files overlap. + if level > 0 { + limit := uint64(kMaxTableSize) + total := uint64(0) + for i, t := range t0 { + total += t.size + if total >= limit { + s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) + t0 = t0[:i+1] + break + } + } + } + + c := &compaction{s: s, v: v, level: level} + c.tables[0] = t0 + c.expand() + return c +} + +// compaction represent a compaction state. +type compaction struct { + s *session + v *version + + level int + tables [2]tFiles + + gp tFiles + gpidx int + seenKey bool + overlappedBytes uint64 + imin, imax iKey + + tPtrs [kNumLevels]int +} + +// Expand compacted tables; need external synchronization. +func (c *compaction) expand() { + level := c.level + vt0, vt1 := c.v.tables[level], c.v.tables[level+1] + + t0, t1 := c.tables[0], c.tables[1] + imin, imax := t0.getRange(c.s.icmp) + t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) + // Get entire range covered by compaction. + amin, amax := append(t0, t1...).getRange(c.s.icmp) + + // See if we can grow the number of inputs in "level" without + // changing the number of "level+1" files we pick up. + if len(t1) > 0 { + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), level == 0) + if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes { + xmin, xmax := exp0.getRange(c.s.icmp) + exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) + if len(exp1) == len(t1) { + c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", + level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) + imin, imax = xmin, xmax + t0, t1 = exp0, exp1 + amin, amax = append(t0, t1...).getRange(c.s.icmp) + } + } + } + + // Compute the set of grandparent files that overlap this compaction + // (parent == level+1; grandparent == level+2) + if level+2 < kNumLevels { + c.gp = c.v.tables[level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + } + + c.tables[0], c.tables[1] = t0, t1 + c.imin, c.imax = imin, imax +} + +// Check whether compaction is trivial. +func (c *compaction) trivial() bool { + return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= kMaxGrandParentOverlapBytes +} + +func (c *compaction) baseLevelForKey(ukey []byte) bool { + for level, tables := range c.v.tables[c.level+2:] { + for c.tPtrs[level] < len(tables) { + t := tables[c.tPtrs[level]] + if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { + // We've advanced far enough. + if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + // Key falls in this file's range, so definitely not base level. + return false + } + break + } + c.tPtrs[level]++ + } + } + return true +} + +func (c *compaction) shouldStopBefore(ikey iKey) bool { + for ; c.gpidx < len(c.gp); c.gpidx++ { + gp := c.gp[c.gpidx] + if c.s.icmp.Compare(ikey, gp.imax) <= 0 { + break + } + if c.seenKey { + c.overlappedBytes += gp.size + } + } + c.seenKey = true + + if c.overlappedBytes > kMaxGrandParentOverlapBytes { + // Too much overlap for current output; start new output. + c.overlappedBytes = 0 + return true + } + return false +} + +// Creates an iterator. +func (c *compaction) newIterator() iterator.Iterator { + // Creates iterator slice. + icap := len(c.tables) + if c.level == 0 { + // Special case for level-0 + icap = len(c.tables[0]) + 1 + } + its := make([]iterator.Iterator, 0, icap) + + // Options. + ro := &opt.ReadOptions{ + DontFillCache: true, + } + strict := c.s.o.GetStrict(opt.StrictIterator) + + for i, tables := range c.tables { + if len(tables) == 0 { + continue + } + + // Level-0 is not sorted and may overlaps each other. + if c.level+i == 0 { + for _, t := range tables { + its = append(its, c.s.tops.newIterator(t, nil, ro)) + } + } else { + it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict, true) + its = append(its, it) + } + } + + return iterator.NewMergedIterator(its, c.s.icmp, true) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go new file mode 100644 index 000000000..272129589 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -0,0 +1,308 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bufio" + "encoding/binary" + "errors" + "io" +) + +var errCorruptManifest = errors.New("leveldb: corrupt manifest") + +type byteReader interface { + io.Reader + io.ByteReader +} + +// These numbers are written to disk and should not be changed. +const ( + recComparer = 1 + recJournalNum = 2 + recNextNum = 3 + recSeq = 4 + recCompactionPointer = 5 + recDeletedTable = 6 + recNewTable = 7 + // 8 was used for large value refs + recPrevJournalNum = 9 +) + +type cpRecord struct { + level int + ikey iKey +} + +type ntRecord struct { + level int + num uint64 + size uint64 + imin iKey + imax iKey +} + +func (r ntRecord) makeFile(s *session) *tFile { + return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) +} + +type dtRecord struct { + level int + num uint64 +} + +type sessionRecord struct { + hasRec int + comparer string + journalNum uint64 + prevJournalNum uint64 + nextNum uint64 + seq uint64 + compactionPointers []cpRecord + addedTables []ntRecord + deletedTables []dtRecord + scratch [binary.MaxVarintLen64]byte + err error +} + +func (p *sessionRecord) has(rec int) bool { + return p.hasRec&(1<= kNumLevels { + p.err = errCorruptManifest + return 0 + } + return int(x) +} + +func (p *sessionRecord) decode(r io.Reader) error { + br, ok := r.(byteReader) + if !ok { + br = bufio.NewReader(r) + } + p.err = nil + for p.err == nil { + rec, err := binary.ReadUvarint(br) + if err != nil { + if err == io.EOF { + err = nil + } + return err + } + switch rec { + case recComparer: + x := p.readBytes(br) + if p.err == nil { + p.setComparer(string(x)) + } + case recJournalNum: + x := p.readUvarint(br) + if p.err == nil { + p.setJournalNum(x) + } + case recPrevJournalNum: + x := p.readUvarint(br) + if p.err == nil { + p.setPrevJournalNum(x) + } + case recNextNum: + x := p.readUvarint(br) + if p.err == nil { + p.setNextNum(x) + } + case recSeq: + x := p.readUvarint(br) + if p.err == nil { + p.setSeq(x) + } + case recCompactionPointer: + level := p.readLevel(br) + ikey := p.readBytes(br) + if p.err == nil { + p.addCompactionPointer(level, iKey(ikey)) + } + case recNewTable: + level := p.readLevel(br) + num := p.readUvarint(br) + size := p.readUvarint(br) + imin := p.readBytes(br) + imax := p.readBytes(br) + if p.err == nil { + p.addTable(level, num, size, imin, imax) + } + case recDeletedTable: + level := p.readLevel(br) + num := p.readUvarint(br) + if p.err == nil { + p.deleteTable(level, num) + } + } + } + + return p.err +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go new file mode 100644 index 000000000..029fabfe6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" +) + +func decodeEncode(v *sessionRecord) (res bool, err error) { + b := new(bytes.Buffer) + err = v.encode(b) + if err != nil { + return + } + v2 := new(sessionRecord) + err = v.decode(b) + if err != nil { + return + } + b2 := new(bytes.Buffer) + err = v2.encode(b2) + if err != nil { + return + } + return bytes.Equal(b.Bytes(), b2.Bytes()), nil +} + +func TestSessionRecord_EncodeDecode(t *testing.T) { + big := uint64(1) << 50 + v := new(sessionRecord) + i := uint64(0) + test := func() { + res, err := decodeEncode(v) + if err != nil { + t.Fatalf("error when testing encode/decode sessionRecord: %v", err) + } + if !res { + t.Error("encode/decode test failed at iteration:", i) + } + } + + for ; i < 4; i++ { + test() + v.addTable(3, big+300+i, big+400+i, + newIKey([]byte("foo"), big+500+1, tVal), + newIKey([]byte("zoo"), big+600+1, tDel)) + v.deleteTable(4, big+700+i) + v.addCompactionPointer(int(i), newIKey([]byte("x"), big+900+1, tVal)) + } + + v.setComparer("foo") + v.setJournalNum(big + 100) + v.setPrevJournalNum(big + 99) + v.setNextNum(big + 200) + v.setSeq(big + 1000) + test() +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go new file mode 100644 index 000000000..a34c9eb4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go @@ -0,0 +1,248 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +// Logging. + +type dropper struct { + s *session + file storage.File +} + +func (d dropper) Drop(err error) { + if e, ok := err.(journal.ErrCorrupted); ok { + d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) + } else { + d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) + } +} + +func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } +func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } + +// File utils. + +func (s *session) getJournalFile(num uint64) storage.File { + return s.stor.GetFile(num, storage.TypeJournal) +} + +func (s *session) getTableFile(num uint64) storage.File { + return s.stor.GetFile(num, storage.TypeTable) +} + +func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { + return s.stor.GetFiles(t) +} + +func (s *session) newTemp() storage.File { + num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 + return s.stor.GetFile(num, storage.TypeTemp) +} + +// Session state. + +// Get current version. +func (s *session) version() *version { + s.vmu.Lock() + defer s.vmu.Unlock() + s.stVersion.ref++ + return s.stVersion +} + +// Get current version; no barrier. +func (s *session) version_NB() *version { + return s.stVersion +} + +// Set current version to v. +func (s *session) setVersion(v *version) { + s.vmu.Lock() + v.ref = 1 + if old := s.stVersion; old != nil { + v.ref++ + old.next = v + old.release_NB() + } + s.stVersion = v + s.vmu.Unlock() +} + +// Get current unused file number. +func (s *session) fileNum() uint64 { + return atomic.LoadUint64(&s.stFileNum) +} + +// Get current unused file number to num. +func (s *session) setFileNum(num uint64) { + atomic.StoreUint64(&s.stFileNum, num) +} + +// Mark file number as used. +func (s *session) markFileNum(num uint64) { + num += 1 + for { + old, x := s.stFileNum, num + if old > x { + x = old + } + if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) { + break + } + } +} + +// Allocate a file number. +func (s *session) allocFileNum() (num uint64) { + return atomic.AddUint64(&s.stFileNum, 1) - 1 +} + +// Reuse given file number. +func (s *session) reuseFileNum(num uint64) { + for { + old, x := s.stFileNum, num + if old != x+1 { + x = old + } + if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) { + break + } + } +} + +// Manifest related utils. + +// Fill given session record obj with current states; need external +// synchronization. +func (s *session) fillRecord(r *sessionRecord, snapshot bool) { + r.setNextNum(s.fileNum()) + + if snapshot { + if !r.has(recJournalNum) { + r.setJournalNum(s.stJournalNum) + } + + if !r.has(recSeq) { + r.setSeq(s.stSeq) + } + + for level, ik := range s.stCptrs { + if ik != nil { + r.addCompactionPointer(level, ik) + } + } + + r.setComparer(s.icmp.uName()) + } +} + +// Mark if record has been commited, this will update session state; +// need external synchronization. +func (s *session) recordCommited(r *sessionRecord) { + if r.has(recJournalNum) { + s.stJournalNum = r.journalNum + } + + if r.has(recPrevJournalNum) { + s.stPrevJournalNum = r.prevJournalNum + } + + if r.has(recSeq) { + s.stSeq = r.seq + } + + for _, p := range r.compactionPointers { + s.stCptrs[p.level] = iKey(p.ikey) + } +} + +// Create a new manifest file; need external synchronization. +func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { + num := s.allocFileNum() + file := s.stor.GetFile(num, storage.TypeManifest) + writer, err := file.Create() + if err != nil { + return + } + jw := journal.NewWriter(writer) + + if v == nil { + v = s.version_NB() + } + if rec == nil { + rec = new(sessionRecord) + } + s.fillRecord(rec, true) + v.fillRecord(rec) + + defer func() { + if err == nil { + s.recordCommited(rec) + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + if s.manifestFile != nil { + s.manifestFile.Remove() + } + s.manifestFile = file + s.manifestWriter = writer + s.manifest = jw + } else { + writer.Close() + file.Remove() + s.reuseFileNum(num) + } + }() + + w, err := jw.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = jw.Flush() + if err != nil { + return + } + err = s.stor.SetManifest(file) + return +} + +// Flush record to disk. +func (s *session) flushManifest(rec *sessionRecord) (err error) { + s.fillRecord(rec, false) + w, err := s.manifest.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = s.manifest.Flush() + if err != nil { + return + } + err = s.manifestWriter.Sync() + if err != nil { + return + } + s.recordCommited(rec) + return +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go new file mode 100644 index 000000000..9b7fa2f12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -0,0 +1,534 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reservefs. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var errFileOpen = errors.New("leveldb/storage: file still open") + +type fileLock interface { + release() error +} + +type fileStorageLock struct { + fs *fileStorage +} + +func (lock *fileStorageLock) Release() { + fs := lock.fs + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.slock == lock { + fs.slock = nil + } + return +} + +// fileStorage is a file-system backed storage. +type fileStorage struct { + path string + + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + buf []byte + // Opened file counter; if open < 0 means closed. + open int + day int +} + +// OpenFile returns a new filesytem-backed storage implementation with the given +// path. This also hold a file lock, so any subsequent attempt to open the same +// path will fail. +// +// The storage must be closed after use, by calling Close method. +func OpenFile(path string) (Storage, error) { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + flock, err := newFileLock(filepath.Join(path, "LOCK")) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + flock.release() + } + }() + + rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) + logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + + fs := &fileStorage{path: path, flock: flock, logw: logw} + runtime.SetFinalizer(fs, (*fileStorage).Close) + return fs, nil +} + +func (fs *fileStorage) Lock() (util.Releaser, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + if fs.slock != nil { + return nil, ErrLocked + } + fs.slock = &fileStorageLock{fs: fs} + return fs.slock, nil +} + +func itoa(buf []byte, i int, wid int) []byte { + var u uint = uint(i) + if u == 0 && wid <= 1 { + return append(buf, '0') + } + + // Assemble decimal in reverse order. + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + return append(buf, b[bp:]...) +} + +func (fs *fileStorage) printDay(t time.Time) { + if fs.day == t.Day() { + return + } + fs.day = t.Day() + fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) +} + +func (fs *fileStorage) doLog(t time.Time, str string) { + fs.printDay(t) + hour, min, sec := t.Clock() + msec := t.Nanosecond() / 1e3 + // time + fs.buf = itoa(fs.buf[:0], hour, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, min, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, sec, 2) + fs.buf = append(fs.buf, '.') + fs.buf = itoa(fs.buf, msec, 6) + fs.buf = append(fs.buf, ' ') + // write + fs.buf = append(fs.buf, []byte(str)...) + fs.buf = append(fs.buf, '\n') + fs.logw.Write(fs.buf) +} + +func (fs *fileStorage) Log(str string) { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) +} + +func (fs *fileStorage) log(str string) { + fs.doLog(time.Now(), str) +} + +func (fs *fileStorage) GetFile(num uint64, t FileType) File { + return &file{fs: fs, num: num, t: t} +} + +func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + fnn, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if err := dir.Close(); err != nil { + fs.log(fmt.Sprintf("close dir: %v", err)) + } + if err != nil { + return + } + f := &file{fs: fs} + for _, fn := range fnn { + if f.parse(fn) && (f.t&t) != 0 { + ff = append(ff, f) + f = &file{fs: fs} + } + } + return +} + +func (fs *fileStorage) GetManifest() (f File, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + fnn, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if err := dir.Close(); err != nil { + fs.log(fmt.Sprintf("close dir: %v", err)) + } + if err != nil { + return + } + // Find latest CURRENT file. + var rem []string + var pend bool + var cerr error + for _, fn := range fnn { + if strings.HasPrefix(fn, "CURRENT") { + pend1 := len(fn) > 7 + // Make sure it is valid name for a CURRENT file, otherwise skip it. + if pend1 { + if fn[7] != '.' || len(fn) < 9 { + fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) + continue + } + if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil { + fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) + continue + } + } + path := filepath.Join(fs.path, fn) + r, e1 := os.OpenFile(path, os.O_RDONLY, 0) + if e1 != nil { + return nil, e1 + } + b, e1 := ioutil.ReadAll(r) + if e1 != nil { + r.Close() + return nil, e1 + } + f1 := &file{fs: fs} + if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { + fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) + if pend1 { + rem = append(rem, fn) + } + if !pend1 || cerr == nil { + cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) + } + } else if f != nil && f1.Num() < f.Num() { + fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) + if pend1 { + rem = append(rem, fn) + } + } else { + f = f1 + pend = pend1 + } + if err := r.Close(); err != nil { + fs.log(fmt.Sprintf("close %s: %v", fn, err)) + } + } + } + // Don't remove any files if there is no valid CURRENT file. + if f == nil { + if cerr != nil { + err = cerr + } else { + err = os.ErrNotExist + } + return + } + // Rename pending CURRENT file to an effective CURRENT. + if pend { + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) + if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { + fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) + } + } + // Remove obsolete or incomplete pending CURRENT files. + for _, fn := range rem { + path := filepath.Join(fs.path, fn) + if err := os.Remove(path); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", fn, err)) + } + } + return +} + +func (fs *fileStorage) SetManifest(f File) (err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + f2, ok := f.(*file) + if !ok || f2.t != TypeManifest { + return ErrInvalidFile + } + defer func() { + if err != nil { + fs.log(fmt.Sprintf("CURRENT: %v", err)) + } + }() + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) + w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + _, err = fmt.Fprintln(w, f2.name()) + // Close the file first. + if err := w.Close(); err != nil { + fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) + } + if err != nil { + return err + } + return rename(path, filepath.Join(fs.path, "CURRENT")) +} + +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) + return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) + } + fs.open = -1 + e1 := fs.logw.Close() + err := fs.flock.release() + if err == nil { + err = e1 + } + return err +} + +type fileWrap struct { + *os.File + f *file +} + +func (fw fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.f.Type() == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.f.fs.path); err != nil { + return err + } + } + return nil +} + +func (fw fileWrap) Close() error { + f := fw.f + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if !f.open { + return ErrClosed + } + f.open = false + f.fs.open-- + err := fw.File.Close() + if err != nil { + f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) + } + return err +} + +type file struct { + fs *fileStorage + num uint64 + t FileType + open bool +} + +func (f *file) Open() (Reader, error) { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return nil, ErrClosed + } + if f.open { + return nil, errFileOpen + } + of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) + if err != nil { + if f.hasOldName() && os.IsNotExist(err) { + of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err + } +ok: + f.open = true + f.fs.open++ + return fileWrap{of, f}, nil +} + +func (f *file) Create() (Writer, error) { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return nil, ErrClosed + } + if f.open { + return nil, errFileOpen + } + of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, err + } + f.open = true + f.fs.open++ + return fileWrap{of, f}, nil +} + +func (f *file) Replace(newfile File) error { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return ErrClosed + } + newfile2, ok := newfile.(*file) + if !ok { + return ErrInvalidFile + } + if f.open || newfile2.open { + return errFileOpen + } + return rename(newfile2.path(), f.path()) +} + +func (f *file) Type() FileType { + return f.t +} + +func (f *file) Num() uint64 { + return f.num +} + +func (f *file) Remove() error { + f.fs.mu.Lock() + defer f.fs.mu.Unlock() + if f.fs.open < 0 { + return ErrClosed + } + if f.open { + return errFileOpen + } + err := os.Remove(f.path()) + if err != nil { + f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) + } + // Also try remove file with old name, just in case. + if f.hasOldName() { + if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { + f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) + err = e1 + } + } + return err +} + +func (f *file) hasOldName() bool { + return f.t == TypeTable +} + +func (f *file) oldName() string { + switch f.t { + case TypeTable: + return fmt.Sprintf("%06d.sst", f.num) + } + return f.name() +} + +func (f *file) oldPath() string { + return filepath.Join(f.fs.path, f.oldName()) +} + +func (f *file) name() string { + switch f.t { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", f.num) + case TypeJournal: + return fmt.Sprintf("%06d.log", f.num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", f.num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", f.num) + default: + panic("invalid file type") + } +} + +func (f *file) path() string { + return filepath.Join(f.fs.path, f.name()) +} + +func (f *file) parse(name string) bool { + var num uint64 + var tail string + _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) + if err == nil { + switch tail { + case "log": + f.t = TypeJournal + case "ldb", "sst": + f.t = TypeTable + case "tmp": + f.t = TypeTemp + default: + return false + } + f.num = num + return true + } + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) + if n == 1 { + f.t = TypeManifest + f.num = num + return true + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go new file mode 100644 index 000000000..42940d769 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -0,0 +1,52 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "os" + "path/filepath" +) + +type plan9FileLock struct { + f *os.File +} + +func (fl *plan9FileLock) release() error { + return fl.f.Close() +} + +func newFileLock(path string) (fl fileLock, err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) + if err != nil { + return + } + fl = &plan9FileLock{f: f} + return +} + +func rename(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err := os.Remove(newpath); err != nil { + return err + } + } + + _, fname := filepath.Split(newpath) + return os.Rename(oldpath, fname) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go new file mode 100644 index 000000000..102031bfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -0,0 +1,68 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build solaris + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string) (fl fileLock, err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return + } + err = setFileLock(f, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, lock bool) error { + flock := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Start: 0, + Len: 0, + Whence: 1, + } + if lock { + flock.Type = syscall.F_WRLCK + } + return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go new file mode 100644 index 000000000..92abcbb7d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "fmt" + "os" + "path/filepath" + "testing" +) + +var cases = []struct { + oldName []string + name string + ftype FileType + num uint64 +}{ + {nil, "000100.log", TypeJournal, 100}, + {nil, "000000.log", TypeJournal, 0}, + {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, + {nil, "MANIFEST-000002", TypeManifest, 2}, + {nil, "MANIFEST-000007", TypeManifest, 7}, + {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, + {nil, "000100.tmp", TypeTemp, 100}, +} + +var invalidCases = []string{ + "", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop", +} + +func TestFileStorage_CreateFileName(t *testing.T) { + for _, c := range cases { + f := &file{num: c.num, t: c.ftype} + if f.name() != c.name { + t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) + } + } +} + +func TestFileStorage_ParseFileName(t *testing.T) { + for _, c := range cases { + for _, name := range append([]string{c.name}, c.oldName...) { + f := new(file) + if !f.parse(name) { + t.Errorf("cannot parse filename '%s'", name) + continue + } + if f.Type() != c.ftype { + t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) + } + if f.Num() != c.num { + t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) + } + } + } +} + +func TestFileStorage_InvalidFileName(t *testing.T) { + for _, name := range invalidCases { + f := new(file) + if f.parse(name) { + t.Errorf("filename '%s' should be invalid", name) + } + } +} + +func TestFileStorage_Locking(t *testing.T) { + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) + + _, err := os.Stat(path) + if err == nil { + err = os.RemoveAll(path) + if err != nil { + t.Fatal("RemoveAll: got error: ", err) + } + } + + p1, err := OpenFile(path) + if err != nil { + t.Fatal("OpenFile(1): got error: ", err) + } + + defer os.RemoveAll(path) + + p2, err := OpenFile(path) + if err != nil { + t.Logf("OpenFile(2): got error: %s (expected)", err) + } else { + p2.Close() + p1.Close() + t.Fatal("OpenFile(2): expect error") + } + + p1.Close() + + p3, err := OpenFile(path) + if err != nil { + t.Fatal("OpenFile(3): got error: ", err) + } + defer p3.Close() + + l, err := p3.Lock() + if err != nil { + t.Fatal("storage lock failed(1): ", err) + } + _, err = p3.Lock() + if err == nil { + t.Fatal("expect error for second storage lock attempt") + } else { + t.Logf("storage lock got error: %s (expected)", err) + } + l.Release() + _, err = p3.Lock() + if err != nil { + t.Fatal("storage lock failed(2): ", err) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go new file mode 100644 index 000000000..d0a604b7a --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -0,0 +1,63 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string) (fl fileLock, err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return + } + err = setFileLock(f, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, lock bool) error { + how := syscall.LOCK_UN + if lock { + how = syscall.LOCK_EX + } + return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go new file mode 100644 index 000000000..50c3c454e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go @@ -0,0 +1,69 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procMoveFileExW = modkernel32.NewProc("MoveFileExW") +) + +const ( + _MOVEFILE_REPLACE_EXISTING = 1 +) + +type windowsFileLock struct { + fd syscall.Handle +} + +func (fl *windowsFileLock) release() error { + return syscall.Close(fl.fd) +} + +func newFileLock(path string) (fl fileLock, err error) { + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return + } + fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err != nil { + return + } + fl = &windowsFileLock{fd: fd} + return +} + +func moveFileEx(from *uint16, to *uint16, flags uint32) error { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + return error(e1) + } else { + return syscall.EINVAL + } + } + return nil +} + +func rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) +} + +func syncDir(name string) error { return nil } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go new file mode 100644 index 000000000..5691c7e02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -0,0 +1,203 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "os" + "sync" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +const typeShift = 3 + +type memStorageLock struct { + ms *memStorage +} + +func (lock *memStorageLock) Release() { + ms := lock.ms + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock == lock { + ms.slock = nil + } + return +} + +// memStorage is a memory-backed storage. +type memStorage struct { + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + manifest *memFilePtr +} + +// NewMemStorage returns a new memory-backed storage implementation. +func NewMemStorage() Storage { + return &memStorage{ + files: make(map[uint64]*memFile), + } +} + +func (ms *memStorage) Lock() (util.Releaser, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock != nil { + return nil, ErrLocked + } + ms.slock = &memStorageLock{ms: ms} + return ms.slock, nil +} + +func (*memStorage) Log(str string) {} + +func (ms *memStorage) GetFile(num uint64, t FileType) File { + return &memFilePtr{ms: ms, num: num, t: t} +} + +func (ms *memStorage) GetFiles(t FileType) ([]File, error) { + ms.mu.Lock() + var ff []File + for x, _ := range ms.files { + num, mt := x>>typeShift, FileType(x)&TypeAll + if mt&t == 0 { + continue + } + ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) + } + ms.mu.Unlock() + return ff, nil +} + +func (ms *memStorage) GetManifest() (File, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.manifest == nil { + return nil, os.ErrNotExist + } + return ms.manifest, nil +} + +func (ms *memStorage) SetManifest(f File) error { + fm, ok := f.(*memFilePtr) + if !ok || fm.t != TypeManifest { + return ErrInvalidFile + } + ms.mu.Lock() + ms.manifest = fm + ms.mu.Unlock() + return nil +} + +func (*memStorage) Close() error { return nil } + +type memReader struct { + *bytes.Reader + m *memFile +} + +func (mr *memReader) Close() error { + return mr.m.Close() +} + +type memFile struct { + bytes.Buffer + ms *memStorage + open bool +} + +func (*memFile) Sync() error { return nil } +func (m *memFile) Close() error { + m.ms.mu.Lock() + m.open = false + m.ms.mu.Unlock() + return nil +} + +type memFilePtr struct { + ms *memStorage + num uint64 + t FileType +} + +func (p *memFilePtr) x() uint64 { + return p.Num()< +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "testing" +) + +func TestMemStorage(t *testing.T) { + m := NewMemStorage() + + l, err := m.Lock() + if err != nil { + t.Fatal("storage lock failed(1): ", err) + } + _, err = m.Lock() + if err == nil { + t.Fatal("expect error for second storage lock attempt") + } else { + t.Logf("storage lock got error: %s (expected)", err) + } + l.Release() + _, err = m.Lock() + if err != nil { + t.Fatal("storage lock failed(2): ", err) + } + + f := m.GetFile(1, TypeTable) + if f.Num() != 1 && f.Type() != TypeTable { + t.Fatal("invalid file number and type") + } + w, _ := f.Create() + w.Write([]byte("abc")) + w.Close() + if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { + t.Fatal("invalid GetFiles len") + } + buf := new(bytes.Buffer) + r, err := f.Open() + if err != nil { + t.Fatal("Open: got error: ", err) + } + buf.ReadFrom(r) + r.Close() + if got := buf.String(); got != "abc" { + t.Fatalf("Read: invalid value, want=abc got=%s", got) + } + if _, err := f.Open(); err != nil { + t.Fatal("Open: got error: ", err) + } + if _, err := m.GetFile(1, TypeTable).Open(); err == nil { + t.Fatal("expecting error") + } + f.Remove() + if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { + t.Fatal("invalid GetFiles len", len(ff)) + } + if _, err := f.Open(); err == nil { + t.Fatal("expecting error") + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go new file mode 100644 index 000000000..bd62220c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -0,0 +1,127 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package storage provides storage abstraction for LevelDB. +package storage + +import ( + "errors" + "fmt" + "io" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type FileType uint32 + +const ( + TypeManifest FileType = 1 << iota + TypeJournal + TypeTable + TypeTemp + + TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp +) + +func (t FileType) String() string { + switch t { + case TypeManifest: + return "manifest" + case TypeJournal: + return "journal" + case TypeTable: + return "table" + case TypeTemp: + return "temp" + } + return fmt.Sprintf("", t) +} + +var ( + ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") + ErrLocked = errors.New("leveldb/storage: already locked") + ErrClosed = errors.New("leveldb/storage: closed") +) + +// Syncer is the interface that wraps basic Sync method. +type Syncer interface { + // Sync commits the current contents of the file to stable storage. + Sync() error +} + +// Reader is the interface that groups the basic Read, Seek, ReadAt and Close +// methods. +type Reader interface { + io.ReadSeeker + io.ReaderAt + io.Closer +} + +// Writer is the interface that groups the basic Write, Sync and Close +// methods. +type Writer interface { + io.WriteCloser + Syncer +} + +// File is the file. A file instance must be goroutine-safe. +type File interface { + // Open opens the file for read. Returns os.ErrNotExist error + // if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open() (r Reader, err error) + + // Create creates the file for writting. Truncate the file if + // already exist. + // Returns ErrClosed if the underlying storage is closed. + Create() (w Writer, err error) + + // Replace replaces file with newfile. + // Returns ErrClosed if the underlying storage is closed. + Replace(newfile File) error + + // Type returns the file type + Type() FileType + + // Num returns the file number. + Num() uint64 + + // Remove removes the file. + // Returns ErrClosed if the underlying storage is closed. + Remove() error +} + +// Storage is the storage. A storage instance must be goroutine-safe. +type Storage interface { + // Lock locks the storage. Any subsequent attempt to call Lock will fail + // until the last lock released. + // After use the caller should call the Release method. + Lock() (l util.Releaser, err error) + + // Log logs a string. This is used for logging. An implementation + // may write to a file, stdout or simply do nothing. + Log(str string) + + // GetFile returns a file for the given number and type. GetFile will never + // returns nil, even if the underlying storage is closed. + GetFile(num uint64, t FileType) File + + // GetFiles returns a slice of files that match the given file types. + // The file types may be OR'ed together. + GetFiles(t FileType) ([]File, error) + + // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest + // file does not exist. + GetManifest() (File, error) + + // SetManifest sets the given file as manifest file. The given file should + // be a manifest file type or error will be returned. + SetManifest(f File) error + + // Close closes the storage. It is valid to call Close multiple times. + // Other methods should not be called after the storage has been closed. + Close() error +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go new file mode 100644 index 000000000..77c0cb6d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go @@ -0,0 +1,459 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENE file. + +package leveldb + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +const typeShift = 4 + +var ( + tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") + tsErrFileOpen = errors.New("leveldb.testStorage: file still open") +) + +var ( + tsFSEnv = os.Getenv("GOLEVELDB_USEFS") + tsKeepFS = tsFSEnv == "2" + tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" + tsMU = &sync.Mutex{} + tsNum = 0 +) + +type tsLock struct { + ts *testStorage + r util.Releaser +} + +func (l tsLock) Release() { + l.r.Release() + l.ts.t.Log("I: storage lock released") +} + +type tsReader struct { + tf tsFile + storage.Reader +} + +func (tr tsReader) Read(b []byte) (n int, err error) { + ts := tr.tf.ts + ts.countRead(tr.tf.Type()) + n, err = tr.Reader.Read(b) + if err != nil && err != io.EOF { + ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) + } + return +} + +func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { + ts := tr.tf.ts + ts.countRead(tr.tf.Type()) + n, err = tr.Reader.ReadAt(b, off) + if err != nil && err != io.EOF { + ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) + } + return +} + +func (tr tsReader) Close() (err error) { + err = tr.Reader.Close() + tr.tf.close("reader", err) + return +} + +type tsWriter struct { + tf tsFile + storage.Writer +} + +func (tw tsWriter) Write(b []byte) (n int, err error) { + ts := tw.tf.ts + ts.mu.Lock() + defer ts.mu.Unlock() + if ts.emuWriteErr&tw.tf.Type() != 0 { + return 0, errors.New("leveldb.testStorage: emulated write error") + } + n, err = tw.Writer.Write(b) + if err != nil { + ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) + } + return +} + +func (tw tsWriter) Sync() (err error) { + ts := tw.tf.ts + ts.mu.Lock() + defer ts.mu.Unlock() + for ts.emuDelaySync&tw.tf.Type() != 0 { + ts.cond.Wait() + } + if ts.emuSyncErr&tw.tf.Type() != 0 { + return errors.New("leveldb.testStorage: emulated sync error") + } + err = tw.Writer.Sync() + if err != nil { + ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) + } + return +} + +func (tw tsWriter) Close() (err error) { + err = tw.Writer.Close() + tw.tf.close("reader", err) + return +} + +type tsFile struct { + ts *testStorage + storage.File +} + +func (tf tsFile) x() uint64 { + return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll + ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) + } + } + ts.mu.Unlock() +} + +func newTestStorage(t *testing.T) *testStorage { + var stor storage.Storage + var closeFn func() error + if tsFS { + for { + tsMU.Lock() + num := tsNum + tsNum++ + tsMU.Unlock() + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) + if _, err := os.Stat(path); err != nil { + stor, err = storage.OpenFile(path) + if err != nil { + t.Fatalf("F: cannot create storage: %v", err) + } + t.Logf("I: storage created: %s", path) + closeFn = func() error { + for _, name := range []string{"LOG.old", "LOG"} { + f, err := os.Open(filepath.Join(path, name)) + if err != nil { + continue + } + if log, err := ioutil.ReadAll(f); err != nil { + t.Logf("---------------------- %s ----------------------", name) + t.Logf("cannot read log: %v", err) + t.Logf("---------------------- %s ----------------------", name) + } else if len(log) > 0 { + t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) + t.Logf("---------------------- %s ----------------------", name) + } + f.Close() + } + if tsKeepFS { + return nil + } + return os.RemoveAll(path) + } + + break + } + } + } else { + stor = storage.NewMemStorage() + } + ts := &testStorage{ + t: t, + Storage: stor, + closeFn: closeFn, + opens: make(map[uint64]bool), + } + ts.cond.L = &ts.mu + return ts +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go new file mode 100644 index 000000000..9224a0025 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go @@ -0,0 +1,469 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sort" + "sync/atomic" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +// tFile holds basic information about a table. +type tFile struct { + file storage.File + seekLeft int32 + size uint64 + imin, imax iKey +} + +// Returns true if given key is after largest key of this table. +func (t *tFile) after(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 +} + +// Returns true if given key is before smallest key of this table. +func (t *tFile) before(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 +} + +// Returns true if given key range overlaps with this table key range. +func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { + return !t.after(icmp, umin) && !t.before(icmp, umax) +} + +// Cosumes one seek and return current seeks left. +func (t *tFile) consumeSeek() int32 { + return atomic.AddInt32(&t.seekLeft, -1) +} + +// Creates new tFile. +func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { + f := &tFile{ + file: file, + size: size, + imin: imin, + imax: imax, + } + + // We arrange to automatically compact this file after + // a certain number of seeks. Let's assume: + // (1) One seek costs 10ms + // (2) Writing or reading 1MB costs 10ms (100MB/s) + // (3) A compaction of 1MB does 25MB of IO: + // 1MB read from this level + // 10-12MB read from next level (boundaries may be misaligned) + // 10-12MB written to next level + // This implies that 25 seeks cost the same as the compaction + // of 1MB of data. I.e., one seek costs approximately the + // same as the compaction of 40KB of data. We are a little + // conservative and allow approximately one seek for every 16KB + // of data before triggering a compaction. + f.seekLeft = int32(size / 16384) + if f.seekLeft < 100 { + f.seekLeft = 100 + } + + return f +} + +// tFiles hold multiple tFile. +type tFiles []*tFile + +func (tf tFiles) Len() int { return len(tf) } +func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } + +// Returns true if i smallest key is less than j. +// This used for sort by key in ascending order. +func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { + a, b := tf[i], tf[j] + n := icmp.Compare(a.imin, b.imin) + if n == 0 { + return a.file.Num() < b.file.Num() + } + return n < 0 +} + +// Returns true if i file number is greater than j. +// This used for sort by file number in descending order. +func (tf tFiles) lessByNum(i, j int) bool { + return tf[i].file.Num() > tf[j].file.Num() +} + +// Sorts tables by key in ascending order. +func (tf tFiles) sortByKey(icmp *iComparer) { + sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) +} + +// Sorts tables by file number in descending order. +func (tf tFiles) sortByNum() { + sort.Sort(&tFilesSortByNum{tFiles: tf}) +} + +// Returns sum of all tables size. +func (tf tFiles) size() (sum uint64) { + for _, t := range tf { + sum += t.size + } + return sum +} + +// Searches smallest index of tables whose its smallest +// key is after or equal with given key. +func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imin, ikey) >= 0 + }) +} + +// Searches smallest index of tables whose its largest +// key is after or equal with given key. +func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imax, ikey) >= 0 + }) +} + +// Returns true if given key range overlaps with one or more +// tables key range. If unsorted is true then binary search will not be used. +func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { + if unsorted { + // Check against all files. + for _, t := range tf { + if t.overlaps(icmp, umin, umax) { + return true + } + } + return false + } + + i := 0 + if len(umin) > 0 { + // Find the earliest possible internal key for min. + i = tf.searchMax(icmp, newIKey(umin, kMaxSeq, tSeek)) + } + if i >= len(tf) { + // Beginning of range is after all files, so no overlap. + return false + } + return !tf[i].before(icmp, umax) +} + +// Returns tables whose its key range overlaps with given key range. +// If overlapped is true then the search will be expanded to tables that +// overlaps with each other. +func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { + x := len(dst) + for i := 0; i < len(tf); { + t := tf[i] + if t.overlaps(icmp, umin, umax) { + if overlapped { + // For overlapped files, check if the newly added file has + // expanded the range. If so, restart search. + if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { + umin = t.imin.ukey() + dst = dst[:x] + i = 0 + continue + } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { + umax = t.imax.ukey() + dst = dst[:x] + i = 0 + continue + } + } + + dst = append(dst, t) + } + i++ + } + + return dst +} + +// Returns tables key range. +func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { + for i, t := range tf { + if i == 0 { + imin, imax = t.imin, t.imax + continue + } + if icmp.Compare(t.imin, imin) < 0 { + imin = t.imin + } + if icmp.Compare(t.imax, imax) > 0 { + imax = t.imax + } + } + + return +} + +// Creates iterator index from tables. +func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { + if slice != nil { + var start, limit int + if slice.Start != nil { + start = tf.searchMax(icmp, iKey(slice.Start)) + } + if slice.Limit != nil { + limit = tf.searchMin(icmp, iKey(slice.Limit)) + } else { + limit = tf.Len() + } + tf = tf[start:limit] + } + return iterator.NewArrayIndexer(&tFilesArrayIndexer{ + tFiles: tf, + tops: tops, + icmp: icmp, + slice: slice, + ro: ro, + }) +} + +// Tables iterator index. +type tFilesArrayIndexer struct { + tFiles + tops *tOps + icmp *iComparer + slice *util.Range + ro *opt.ReadOptions +} + +func (a *tFilesArrayIndexer) Search(key []byte) int { + return a.searchMax(a.icmp, iKey(key)) +} + +func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { + if i == 0 || i == a.Len()-1 { + return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) + } + return a.tops.newIterator(a.tFiles[i], nil, a.ro) +} + +// Helper type for sortByKey. +type tFilesSortByKey struct { + tFiles + icmp *iComparer +} + +func (x *tFilesSortByKey) Less(i, j int) bool { + return x.lessByKey(x.icmp, i, j) +} + +// Helper type for sortByNum. +type tFilesSortByNum struct { + tFiles +} + +func (x *tFilesSortByNum) Less(i, j int) bool { + return x.lessByNum(i, j) +} + +// Table operations. +type tOps struct { + s *session + cache cache.Cache + cacheNS cache.Namespace + bpool *util.BufferPool +} + +// Creates an empty table and returns table writer. +func (t *tOps) create() (*tWriter, error) { + file := t.s.getTableFile(t.s.allocFileNum()) + fw, err := file.Create() + if err != nil { + return nil, err + } + return &tWriter{ + t: t, + file: file, + w: fw, + tw: table.NewWriter(fw, t.s.o), + }, nil +} + +// Builds table from src iterator. +func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { + w, err := t.create() + if err != nil { + return + } + + defer func() { + if err != nil { + w.drop() + } + }() + + for src.Next() { + err = w.append(src.Key(), src.Value()) + if err != nil { + return + } + } + err = src.Error() + if err != nil { + return + } + + n = w.tw.EntriesLen() + f, err = w.finish() + return +} + +// Opens table. It returns a cache handle, which should +// be released after use. +func (t *tOps) open(f *tFile) (ch cache.Handle, err error) { + num := f.file.Num() + ch = t.cacheNS.Get(num, func() (charge int, value interface{}) { + var r storage.Reader + r, err = f.file.Open() + if err != nil { + return 0, nil + } + + var bcacheNS cache.Namespace + if bc := t.s.o.GetBlockCache(); bc != nil { + bcacheNS = bc.GetNamespace(num) + } + return 1, table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o) + }) + if ch == nil && err == nil { + err = ErrClosed + } + return +} + +// Finds key/value pair whose key is greater than or equal to the +// given key. +func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).Find(key, ro) +} + +// Returns approximate offset of the given key. +func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { + ch, err := t.open(f) + if err != nil { + return + } + defer ch.Release() + offset_, err := ch.Value().(*table.Reader).OffsetOf(key) + return uint64(offset_), err +} + +// Creates an iterator from the given table. +func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + ch, err := t.open(f) + if err != nil { + return iterator.NewEmptyIterator(err) + } + iter := ch.Value().(*table.Reader).NewIterator(slice, ro) + iter.SetReleaser(ch) + return iter +} + +// Removes table from persistent storage. It waits until +// no one use the the table. +func (t *tOps) remove(f *tFile) { + num := f.file.Num() + t.cacheNS.Delete(num, func(exist, pending bool) { + if !pending { + if err := f.file.Remove(); err != nil { + t.s.logf("table@remove removing @%d %q", num, err) + } else { + t.s.logf("table@remove removed @%d", num) + } + if bc := t.s.o.GetBlockCache(); bc != nil { + bc.ZapNamespace(num) + } + } + }) +} + +// Closes the table ops instance. It will close all tables, +// regadless still used or not. +func (t *tOps) close() { + t.cache.Zap() + t.bpool.Close() +} + +// Creates new initialized table ops instance. +func newTableOps(s *session, cacheCap int) *tOps { + c := cache.NewLRUCache(cacheCap) + return &tOps{ + s: s, + cache: c, + cacheNS: c.GetNamespace(0), + bpool: util.NewBufferPool(s.o.GetBlockSize() + 5), + } +} + +// tWriter wraps the table writer. It keep track of file descriptor +// and added key range. +type tWriter struct { + t *tOps + + file storage.File + w storage.Writer + tw *table.Writer + + first, last []byte +} + +// Append key/value pair to the table. +func (w *tWriter) append(key, value []byte) error { + if w.first == nil { + w.first = append([]byte{}, key...) + } + w.last = append(w.last[:0], key...) + return w.tw.Append(key, value) +} + +// Returns true if the table is empty. +func (w *tWriter) empty() bool { + return w.first == nil +} + +// Finalizes the table and returns table file. +func (w *tWriter) finish() (f *tFile, err error) { + err = w.tw.Close() + if err != nil { + return + } + err = w.w.Sync() + if err != nil { + w.w.Close() + return + } + w.w.Close() + f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) + return +} + +// Drops the table. +func (w *tWriter) drop() { + w.w.Close() + w.file.Remove() + w.t.s.reuseFileNum(w.file.Num()) + w.w = nil + w.file = nil + w.tw = nil + w.first = nil + w.last = nil +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go new file mode 100644 index 000000000..790e1443a --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go @@ -0,0 +1,131 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func (b *block) TestNewIterator(slice *util.Range) iterator.Iterator { + return b.newIterator(slice, false, nil) +} + +var _ = testutil.Defer(func() { + Describe("Block", func() { + Build := func(kv *testutil.KeyValue, restartInterval int) *block { + // Building the block. + bw := &blockWriter{ + restartInterval: restartInterval, + scratch: make([]byte, 30), + } + kv.Iterate(func(i int, key, value []byte) { + bw.append(key, value) + }) + bw.finish() + + // Opening the block. + data := bw.buf.Bytes() + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + return &block{ + tr: &Reader{cmp: comparer.DefaultComparer}, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + } + } + + Describe("read test", func() { + for restartInterval := 1; restartInterval <= 5; restartInterval++ { + Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { + kv := &testutil.KeyValue{} + Text := func() string { + return fmt.Sprintf("and %d keys", kv.Len()) + } + + Test := func() { + // Make block. + br := Build(kv, restartInterval) + // Do testing. + testutil.KeyValueTesting(nil, br, kv.Clone()) + } + + Describe(Text(), Test) + + kv.PutString("", "empty") + Describe(Text(), Test) + + kv.PutString("a1", "foo") + Describe(Text(), Test) + + kv.PutString("a2", "v") + Describe(Text(), Test) + + kv.PutString("a3qqwrkks", "hello") + Describe(Text(), Test) + + kv.PutString("a4", "bar") + Describe(Text(), Test) + + kv.PutString("a5111111", "v5") + kv.PutString("a6", "") + kv.PutString("a7", "v7") + kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") + kv.PutString("b", "v9") + kv.PutString("c9", "v9") + kv.PutString("c91", "v9") + kv.PutString("d0", "v9") + Describe(Text(), Test) + }) + } + }) + + Describe("out-of-bound slice test", func() { + kv := &testutil.KeyValue{} + kv.PutString("k1", "v1") + kv.PutString("k2", "v2") + kv.PutString("k3abcdefgg", "v3") + kv.PutString("k4", "v4") + kv.PutString("k5", "v5") + for restartInterval := 1; restartInterval <= 5; restartInterval++ { + Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { + // Make block. + br := Build(kv, restartInterval) + + Test := func(r *util.Range) func(done Done) { + return func(done Done) { + iter := br.newIterator(r, false, nil) + Expect(iter.Error()).ShouldNot(HaveOccurred()) + + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: iter, + } + + testutil.DoIteratorTesting(&t) + done <- true + } + } + + It("Should do iterations and seeks correctly #0", + Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) + + It("Should do iterations and seeks correctly #1", + Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) + }) + } + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go new file mode 100644 index 000000000..fc4c3ed26 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -0,0 +1,934 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + "strings" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrNotFound = util.ErrNotFound + ErrIterReleased = errors.New("leveldb/table: iterator released") +) + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +type block struct { + tr *Reader + data []byte + restartsLen int + restartsOffset int + // Whether checksum is verified and valid. + checksum bool +} + +func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) { + index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) + offset += 1 // shared always zero, since this is a restart point + v1, n1 := binary.Uvarint(b.data[offset:]) // key length + _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length + m := offset + n1 + n2 + return b.tr.cmp.Compare(b.data[m:m+int(v1)], key) > 0 + }) + rstart - 1 + if index < rstart { + // The smallest key is greater-than key sought. + index = rstart + } + offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) + return +} + +func (b *block) restartIndex(rstart, rlimit, offset int) int { + return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset + }) + rstart - 1 +} + +func (b *block) restartOffset(index int) int { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) +} + +func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { + if offset >= b.restartsOffset { + if offset != b.restartsOffset { + err = errors.New("leveldb/table: Reader: BlockEntry: invalid block (block entries offset not aligned)") + } + return + } + v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length + v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length + v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length + m := n0 + n1 + n2 + n = m + int(v1) + int(v2) + if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { + err = errors.New("leveldb/table: Reader: invalid block (block entries corrupted)") + return + } + key = b.data[offset+m : offset+m+int(v1)] + value = b.data[offset+m+int(v1) : offset+n] + nShared = int(v0) + return +} + +func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter { + bi := &blockIter{ + block: b, + cache: cache, + // Valid key should never be nil. + key: make([]byte, 0), + dir: dirSOI, + riStart: 0, + riLimit: b.restartsLen, + offsetStart: 0, + offsetRealStart: 0, + offsetLimit: b.restartsOffset, + } + if slice != nil { + if slice.Start != nil { + if bi.Seek(slice.Start) { + bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) + bi.offsetStart = b.restartOffset(bi.riStart) + bi.offsetRealStart = bi.prevOffset + } else { + bi.riStart = b.restartsLen + bi.offsetStart = b.restartsOffset + bi.offsetRealStart = b.restartsOffset + } + } + if slice.Limit != nil { + if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { + bi.offsetLimit = bi.prevOffset + bi.riLimit = bi.restartIndex + 1 + } + } + bi.reset() + if bi.offsetStart > bi.offsetLimit { + bi.sErr(errors.New("leveldb/table: Reader: invalid slice range")) + } + } + return bi +} + +func (b *block) Release() { + if b.tr.bpool != nil { + b.tr.bpool.Put(b.data) + } + b.tr = nil + b.data = nil +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type blockIter struct { + block *block + cache, releaser util.Releaser + key, value []byte + offset int + // Previous offset, only filled by Next. + prevOffset int + prevNode []int + prevKeys []byte + restartIndex int + // Iterator direction. + dir dir + // Restart index slice range. + riStart int + riLimit int + // Offset slice range. + offsetStart int + offsetRealStart int + offsetLimit int + // Error. + err error +} + +func (i *blockIter) sErr(err error) { + i.err = err + i.key = nil + i.value = nil + i.prevNode = nil + i.prevKeys = nil +} + +func (i *blockIter) reset() { + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.restartIndex = i.riStart + i.offset = i.offsetStart + i.dir = dirSOI + i.key = i.key[:0] + i.value = nil +} + +func (i *blockIter) isFirst() bool { + switch i.dir { + case dirForward: + return i.prevOffset == i.offsetRealStart + case dirBackward: + return len(i.prevNode) == 1 && i.restartIndex == i.riStart + } + return false +} + +func (i *blockIter) isLast() bool { + switch i.dir { + case dirForward, dirBackward: + return i.offset == i.offsetLimit + } + return false +} + +func (i *blockIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirSOI + return i.Next() +} + +func (i *blockIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirEOI + return i.Prev() +} + +func (i *blockIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ri, offset, err := i.block.seek(i.riStart, i.riLimit, key) + if err != nil { + i.sErr(err) + return false + } + i.restartIndex = ri + i.offset = max(i.offsetStart, offset) + if i.dir == dirSOI || i.dir == dirEOI { + i.dir = dirForward + } + for i.Next() { + if i.block.tr.cmp.Compare(i.key, key) >= 0 { + return true + } + } + return false +} + +func (i *blockIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirSOI { + i.restartIndex = i.riStart + i.offset = i.offsetStart + } else if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + for i.offset < i.offsetRealStart { + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(err) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.offset += n + } + if i.offset >= i.offsetLimit { + i.dir = dirEOI + if i.offset != i.offsetLimit { + i.sErr(errors.New("leveldb/table: Reader: Next: invalid block (block entries offset not aligned)")) + } + return false + } + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(err) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.prevOffset = i.offset + i.offset += n + i.dir = dirForward + return true +} + +func (i *blockIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + var ri int + if i.dir == dirForward { + // Change direction. + i.offset = i.prevOffset + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) + i.dir = dirBackward + } else if i.dir == dirEOI { + // At the end of iterator. + i.restartIndex = i.riLimit + i.offset = i.offsetLimit + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.riLimit - 1 + i.dir = dirBackward + } else if len(i.prevNode) == 1 { + // This is the end of a restart range. + i.offset = i.prevNode[0] + i.prevNode = i.prevNode[:0] + if i.restartIndex == i.riStart { + i.dir = dirSOI + return false + } + i.restartIndex-- + ri = i.restartIndex + } else { + // In the middle of restart range, get from cache. + n := len(i.prevNode) - 3 + node := i.prevNode[n:] + i.prevNode = i.prevNode[:n] + // Get the key. + ko := node[0] + i.key = append(i.key[:0], i.prevKeys[ko:]...) + i.prevKeys = i.prevKeys[:ko] + // Get the value. + vo := node[1] + vl := vo + node[2] + i.value = i.block.data[vo:vl] + i.offset = vl + return true + } + // Build entries cache. + i.key = i.key[:0] + i.value = nil + offset := i.block.restartOffset(ri) + if offset == i.offset { + ri -= 1 + if ri < 0 { + i.dir = dirSOI + return false + } + offset = i.block.restartOffset(ri) + } + i.prevNode = append(i.prevNode, offset) + for { + key, value, nShared, n, err := i.block.entry(offset) + if err != nil { + i.sErr(err) + return false + } + if offset >= i.offsetRealStart { + if i.value != nil { + // Appends 3 variables: + // 1. Previous keys offset + // 2. Value offset in the data block + // 3. Value length + i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) + i.prevKeys = append(i.prevKeys, i.key...) + } + i.value = value + } + i.key = append(i.key[:nShared], key...) + offset += n + // Stop if target offset reached. + if offset >= i.offset { + if offset != i.offset { + i.sErr(errors.New("leveldb/table: Reader: Prev: invalid block (block entries offset not aligned)")) + return false + } + + break + } + } + i.restartIndex = ri + i.offset = offset + return true +} + +func (i *blockIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *blockIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *blockIter) Release() { + if i.dir > dirReleased { + i.block = nil + i.prevNode = nil + i.prevKeys = nil + i.key = nil + i.value = nil + i.dir = dirReleased + if i.cache != nil { + i.cache.Release() + i.cache = nil + } + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *blockIter) SetReleaser(releaser util.Releaser) { + if i.dir > dirReleased { + i.releaser = releaser + } +} + +func (i *blockIter) Valid() bool { + return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) +} + +func (i *blockIter) Error() error { + return i.err +} + +type filterBlock struct { + tr *Reader + data []byte + oOffset int + baseLg uint + filtersNum int +} + +func (b *filterBlock) contains(offset uint64, key []byte) bool { + i := int(offset >> b.baseLg) + if i < b.filtersNum { + o := b.data[b.oOffset+i*4:] + n := int(binary.LittleEndian.Uint32(o)) + m := int(binary.LittleEndian.Uint32(o[4:])) + if n < m && m <= b.oOffset { + return b.tr.filter.Contains(b.data[n:m], key) + } else if n == m { + return false + } + } + return true +} + +func (b *filterBlock) Release() { + if b.tr.bpool != nil { + b.tr.bpool.Put(b.data) + } + b.tr = nil + b.data = nil +} + +type indexIter struct { + *blockIter + slice *util.Range + // Options + checksum bool + fillCache bool +} + +func (i *indexIter) Get() iterator.Iterator { + value := i.Value() + if value == nil { + return nil + } + dataBH, n := decodeBlockHandle(value) + if n == 0 { + return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid table (bad data block handle)")) + } + var slice *util.Range + if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { + slice = i.slice + } + return i.blockIter.block.tr.getDataIter(dataBH, slice, i.checksum, i.fillCache) +} + +// Reader is a table reader. +type Reader struct { + reader io.ReaderAt + cache cache.Namespace + err error + bpool *util.BufferPool + // Options + cmp comparer.Comparer + filter filter.Filter + checksum bool + strictIter bool + + dataEnd int64 + indexBH, filterBH blockHandle +} + +func verifyChecksum(data []byte) bool { + n := len(data) - 4 + checksum0 := binary.LittleEndian.Uint32(data[n:]) + checksum1 := util.NewCRC(data[:n]).Value() + return checksum0 == checksum1 +} + +func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) { + data := r.bpool.Get(int(bh.length + blockTrailerLen)) + if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { + return nil, err + } + if checksum || r.checksum { + if !verifyChecksum(data) { + r.bpool.Put(data) + return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)") + } + } + switch data[bh.length] { + case blockTypeNoCompression: + data = data[:bh.length] + case blockTypeSnappyCompression: + decLen, err := snappy.DecodedLen(data[:bh.length]) + if err != nil { + return nil, err + } + tmp := data + data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length]) + r.bpool.Put(tmp) + if err != nil { + return nil, err + } + default: + r.bpool.Put(data) + return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length]) + } + return data, nil +} + +func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) { + data, err := r.readRawBlock(bh, checksum) + if err != nil { + return nil, err + } + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + b := &block{ + tr: r, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + checksum: checksum || r.checksum, + } + return b, nil +} + +func (r *Reader) readBlockCached(bh blockHandle, checksum, fillCache bool) (*block, util.Releaser, error) { + if r.cache != nil { + var err error + ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) { + if !fillCache { + return 0, nil + } + var b *block + b, err = r.readBlock(bh, checksum) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + if ch != nil { + b, ok := ch.Value().(*block) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type") + } + if !b.checksum && (r.checksum || checksum) { + if !verifyChecksum(b.data) { + ch.Release() + return nil, nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)") + } + b.checksum = true + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readBlock(bh, checksum) + return b, b, err +} + +func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { + data, err := r.readRawBlock(bh, true) + if err != nil { + return nil, err + } + n := len(data) + if n < 5 { + return nil, errors.New("leveldb/table: Reader: invalid filter block (too short)") + } + m := n - 5 + oOffset := int(binary.LittleEndian.Uint32(data[m:])) + if oOffset > m { + return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)") + } + b := &filterBlock{ + tr: r, + data: data, + oOffset: oOffset, + baseLg: uint(data[n-1]), + filtersNum: (m - oOffset) / 4, + } + return b, nil +} + +func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { + if r.cache != nil { + var err error + ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) { + if !fillCache { + return 0, nil + } + var b *filterBlock + b, err = r.readFilterBlock(bh) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + if ch != nil { + b, ok := ch.Value().(*filterBlock) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readFilterBlock(bh) + return b, b, err +} + +func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator { + b, rel, err := r.readBlockCached(dataBH, checksum, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + return b.newIterator(slice, false, rel) +} + +// NewIterator creates an iterator from the table. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// table. And a nil Range.Limit is treated as a key after all keys in +// the table. +// +// The returned iterator is not goroutine-safe and should be released +// when not used. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + fillCache := !ro.GetDontFillCache() + b, rel, err := r.readBlockCached(r.indexBH, true, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + index := &indexIter{ + blockIter: b.newIterator(slice, true, rel), + slice: slice, + checksum: ro.GetStrict(opt.StrictBlockChecksum), + fillCache: !ro.GetDontFillCache(), + } + return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false) +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err error) { + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := indexBlock.newIterator(nil, true, nil) + defer index.Release() + if !index.Seek(key) { + err = index.Error() + if err == nil { + err = ErrNotFound + } + return + } + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)") + return + } + if r.filter != nil { + filterBlock, rel, ferr := r.readFilterBlockCached(r.filterBH, true) + if ferr == nil { + if !filterBlock.contains(dataBH.offset, key) { + rel.Release() + return nil, nil, ErrNotFound + } + rel.Release() + } + } + data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache()) + defer data.Release() + if !data.Seek(key) { + err = data.Error() + if err == nil { + err = ErrNotFound + } + return + } + // Don't use block buffer, no need to copy the buffer. + rkey = data.Key() + // Use block buffer, and since the buffer will be recycled, the buffer + // need to be copied. + value = append([]byte{}, data.Value()...) + return +} + +// Get gets the value for the given key. It returns errors.ErrNotFound +// if the table does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + if r.err != nil { + err = r.err + return + } + + rkey, value, err := r.Find(key, ro) + if err == nil && r.cmp.Compare(rkey, key) != 0 { + value = nil + err = ErrNotFound + } + return +} + +// OffsetOf returns approximate offset for the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := indexBlock.newIterator(nil, true, nil) + defer index.Release() + if index.Seek(key) { + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)") + return + } + offset = int64(dataBH.offset) + return + } + err = index.Error() + if err == nil { + offset = r.dataEnd + } + return +} + +// Release implements util.Releaser. +// It also close the file if it is an io.Closer. +func (r *Reader) Release() { + if closer, ok := r.reader.(io.Closer); ok { + closer.Close() + } + r.reader = nil + r.cache = nil + r.bpool = nil +} + +// NewReader creates a new initialized table reader for the file. +// The cache and bpool is optional and can be nil. +// +// The returned table reader instance is goroutine-safe. +func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader { + if bpool == nil { + bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen) + } + r := &Reader{ + reader: f, + cache: cache, + bpool: bpool, + cmp: o.GetComparer(), + checksum: o.GetStrict(opt.StrictBlockChecksum), + strictIter: o.GetStrict(opt.StrictIterator), + } + if f == nil { + r.err = errors.New("leveldb/table: Reader: nil file") + return r + } + if size < footerLen { + r.err = errors.New("leveldb/table: Reader: invalid table (file size is too small)") + return r + } + var footer [footerLen]byte + if _, err := r.reader.ReadAt(footer[:], size-footerLen); err != nil && err != io.EOF { + r.err = fmt.Errorf("leveldb/table: Reader: invalid table (could not read footer): %v", err) + } + if string(footer[footerLen-len(magic):footerLen]) != magic { + r.err = errors.New("leveldb/table: Reader: invalid table (bad magic number)") + return r + } + // Decode the metaindex block handle. + metaBH, n := decodeBlockHandle(footer[:]) + if n == 0 { + r.err = errors.New("leveldb/table: Reader: invalid table (bad metaindex block handle)") + return r + } + // Decode the index block handle. + r.indexBH, n = decodeBlockHandle(footer[n:]) + if n == 0 { + r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)") + return r + } + // Read metaindex block. + metaBlock, err := r.readBlock(metaBH, true) + if err != nil { + r.err = err + return r + } + // Set data end. + r.dataEnd = int64(metaBH.offset) + metaIter := metaBlock.newIterator(nil, false, nil) + for metaIter.Next() { + key := string(metaIter.Key()) + if !strings.HasPrefix(key, "filter.") { + continue + } + fn := key[7:] + if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { + r.filter = f0 + } else { + for _, f0 := range o.GetAltFilters() { + if f0.Name() == fn { + r.filter = f0 + break + } + } + } + if r.filter != nil { + filterBH, n := decodeBlockHandle(metaIter.Value()) + if n == 0 { + continue + } + r.filterBH = filterBH + // Update data end. + r.dataEnd = int64(filterBH.offset) + break + } + } + metaIter.Release() + metaBlock.Release() + return r +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go new file mode 100644 index 000000000..c0ac70d9e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go @@ -0,0 +1,177 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package table allows read and write sorted key/value. +package table + +import ( + "encoding/binary" +) + +/* +Table: + +Table is consist of one or more data blocks, an optional filter block +a metaindex block, an index block and a table footer. Metaindex block +is a special block used to keep parameters of the table, such as filter +block name and its block handle. Index block is a special block used to +keep record of data blocks offset and length, index block use one as +restart interval. The key used by index block are the last key of preceding +block, shorter separator of adjacent blocks or shorter successor of the +last key of the last block. Filter block is an optional block contains +sequence of filter data generated by a filter generator. + +Table data structure: + + optional + / + +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ + | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | + +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ + + Each block followed by a 5-bytes trailer contains compression type and checksum. + +Table block trailer: + + +---------------------------+-------------------+ + | compression type (1-byte) | checksum (4-byte) | + +---------------------------+-------------------+ + + The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression + type also included in the checksum. + +Table footer: + + +------------------- 40-bytes -------------------+ + / \ + +------------------------+--------------------+------+-----------------+ + | metaindex block handle / index block handle / ---- | magic (8-bytes) | + +------------------------+--------------------+------+-----------------+ + + The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Block: + +Block is consist of one or more key/value entries and a block trailer. +Block entry shares key prefix with its preceding key until a restart +point reached. A block should contains at least one restart point. +First restart point are always zero. + +Block data structure: + + + restart point + restart point (depends on restart interval) + / / + +---------------+---------------+---------------+---------------+---------+ + | block entry 1 | block entry 2 | ... | block entry n | trailer | + +---------------+---------------+---------------+---------------+---------+ + +Key/value entry: + + +---- key len ----+ + / \ + +-------+---------+-----------+---------+--------------------+--------------+----------------+ + | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | + +-----------------+---------------------+--------------------+--------------+----------------+ + + Block entry shares key prefix with its preceding key: + Conditions: + restart_interval=2 + entry one : key=deck,value=v1 + entry two : key=dock,value=v2 + entry three: key=duck,value=v3 + The entries will be encoded as follow: + + + restart point (offset=0) + restart point (offset=16) + / / + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + \ / \ / \ / + +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ + + The block trailer will contains two restart points: + + +------------+-----------+--------+ + | 0 | 16 | 2 | + +------------+-----------+---+----+ + \ / \ + +-- restart points --+ + restart points length + +Block trailer: + + +-- 4-bytes --+ + / \ + +-----------------+-----------------+-----------------+------------------------------+ + | restart point 1 | .... | restart point n | restart points len (4-bytes) | + +-----------------+-----------------+-----------------+------------------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Filter block: + +Filter block consist of one or more filter data and a filter block trailer. +The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. + +Filter block data structure: + + + offset 1 + offset 2 + offset n + trailer offset + / / / / + +---------------+---------------+---------------+---------+ + | filter data 1 | ... | filter data n | trailer | + +---------------+---------------+---------------+---------+ + +Filter block trailer: + + +- 4-bytes -+ + / \ + +---------------+---------------+---------------+-------------------------+------------------+ + | offset 1 | .... | offset n | filter offset (4-bytes) | base Lg (1-byte) | + +-------------- +---------------+---------------+-------------------------+------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +const ( + blockTrailerLen = 5 + footerLen = 48 + + magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" + + // The block type gives the per-block compression format. + // These constants are part of the file format and should not be changed. + blockTypeNoCompression = 0 + blockTypeSnappyCompression = 1 + + // Generate new filter every 2KB of data + filterBaseLg = 11 + filterBase = 1 << filterBaseLg +) + +type blockHandle struct { + offset, length uint64 +} + +func decodeBlockHandle(src []byte) (blockHandle, int) { + offset, n := binary.Uvarint(src) + length, m := binary.Uvarint(src[n:]) + if n == 0 || m == 0 { + return blockHandle{}, 0 + } + return blockHandle{offset, length}, n + m +} + +func encodeBlockHandle(dst []byte, b blockHandle) int { + n := binary.PutUvarint(dst, b.offset) + m := binary.PutUvarint(dst[n:], b.length) + return n + m +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go new file mode 100644 index 000000000..6ab892d54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go @@ -0,0 +1,17 @@ +package table + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestTable(t *testing.T) { + testutil.RunDefer() + + RegisterFailHandler(Fail) + RunSpecs(t, "Table Suite") +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go new file mode 100644 index 000000000..4aad03019 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go @@ -0,0 +1,121 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type tableWrapper struct { + *Reader +} + +func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { + return t.Reader.Find(key, nil) +} + +func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { + return t.Reader.Get(key, nil) +} + +func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.Reader.NewIterator(slice, nil) +} + +var _ = testutil.Defer(func() { + Describe("Table", func() { + Describe("approximate offset test", func() { + var ( + buf = &bytes.Buffer{} + o = &opt.Options{ + BlockSize: 1024, + Compression: opt.NoCompression, + } + ) + + // Building the table. + tw := NewWriter(buf, o) + tw.Append([]byte("k01"), []byte("hello")) + tw.Append([]byte("k02"), []byte("hello2")) + tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) + tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) + tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) + tw.Append([]byte("k06"), []byte("hello3")) + tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) + err := tw.Close() + + It("Should be able to approximate offset of a key correctly", func() { + Expect(err).ShouldNot(HaveOccurred()) + + tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o) + CheckOffset := func(key string, expect, threshold int) { + offset, err := tr.OffsetOf([]byte(key)) + Expect(err).ShouldNot(HaveOccurred()) + Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) + } + + CheckOffset("k0", 0, 0) + CheckOffset("k01a", 0, 0) + CheckOffset("k02", 0, 0) + CheckOffset("k03", 0, 0) + CheckOffset("k04", 10000, 1000) + CheckOffset("k04a", 210000, 1000) + CheckOffset("k05", 210000, 1000) + CheckOffset("k06", 510000, 1000) + CheckOffset("k07", 510000, 1000) + CheckOffset("xyz", 610000, 2000) + }) + }) + + Describe("read test", func() { + Build := func(kv testutil.KeyValue) testutil.DB { + o := &opt.Options{ + BlockSize: 512, + BlockRestartInterval: 3, + } + buf := &bytes.Buffer{} + + // Building the table. + tw := NewWriter(buf, o) + kv.Iterate(func(i int, key, value []byte) { + tw.Append(key, value) + }) + tw.Close() + + // Opening the table. + tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o) + return tableWrapper{tr} + } + Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { + return func() { + db := Build(*kv) + if body != nil { + body(db.(tableWrapper).Reader) + } + testutil.KeyValueTesting(nil, db, *kv) + } + } + + testutil.AllKeyValueTesting(nil, Build) + Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { + It("should have correct blocks number", func() { + indexBlock, err := r.readBlock(r.indexBH, true) + Expect(err).To(BeNil()) + Expect(indexBlock.restartsLen).Should(Equal(9)) + }) + })) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go new file mode 100644 index 000000000..9dea5b87c --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go @@ -0,0 +1,379 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func sharedPrefixLen(a, b []byte) int { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for i < n && a[i] == b[i] { + i++ + } + return i +} + +type blockWriter struct { + restartInterval int + buf util.Buffer + nEntries int + prevKey []byte + restarts []uint32 + scratch []byte +} + +func (w *blockWriter) append(key, value []byte) { + nShared := 0 + if w.nEntries%w.restartInterval == 0 { + w.restarts = append(w.restarts, uint32(w.buf.Len())) + } else { + nShared = sharedPrefixLen(w.prevKey, key) + } + n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) + w.buf.Write(w.scratch[:n]) + w.buf.Write(key[nShared:]) + w.buf.Write(value) + w.prevKey = append(w.prevKey[:0], key...) + w.nEntries++ +} + +func (w *blockWriter) finish() { + // Write restarts entry. + if w.nEntries == 0 { + // Must have at least one restart entry. + w.restarts = append(w.restarts, 0) + } + w.restarts = append(w.restarts, uint32(len(w.restarts))) + for _, x := range w.restarts { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } +} + +func (w *blockWriter) reset() { + w.buf.Reset() + w.nEntries = 0 + w.restarts = w.restarts[:0] +} + +func (w *blockWriter) bytesLen() int { + restartsLen := len(w.restarts) + if restartsLen == 0 { + restartsLen = 1 + } + return w.buf.Len() + 4*restartsLen + 4 +} + +type filterWriter struct { + generator filter.FilterGenerator + buf util.Buffer + nKeys int + offsets []uint32 +} + +func (w *filterWriter) add(key []byte) { + if w.generator == nil { + return + } + w.generator.Add(key) + w.nKeys++ +} + +func (w *filterWriter) flush(offset uint64) { + if w.generator == nil { + return + } + for x := int(offset / filterBase); x > len(w.offsets); { + w.generate() + } +} + +func (w *filterWriter) finish() { + if w.generator == nil { + return + } + // Generate last keys. + + if w.nKeys > 0 { + w.generate() + } + w.offsets = append(w.offsets, uint32(w.buf.Len())) + for _, x := range w.offsets { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } + w.buf.WriteByte(filterBaseLg) +} + +func (w *filterWriter) generate() { + // Record offset. + w.offsets = append(w.offsets, uint32(w.buf.Len())) + // Generate filters. + if w.nKeys > 0 { + w.generator.Generate(&w.buf) + w.nKeys = 0 + } +} + +// Writer is a table writer. +type Writer struct { + writer io.Writer + err error + // Options + cmp comparer.Comparer + filter filter.Filter + compression opt.Compression + blockSize int + + dataBlock blockWriter + indexBlock blockWriter + filterBlock filterWriter + pendingBH blockHandle + offset uint64 + nEntries int + // Scratch allocated enough for 5 uvarint. Block writer should not use + // first 20-bytes since it will be used to encode block handle, which + // then passed to the block writer itself. + scratch [50]byte + comparerScratch []byte + compressionScratch []byte +} + +func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { + // Compress the buffer if necessary. + var b []byte + if compression == opt.SnappyCompression { + // Allocate scratch enough for compression and block trailer. + if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { + w.compressionScratch = make([]byte, n) + } + var compressed []byte + compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) + if err != nil { + return + } + n := len(compressed) + b = compressed[:n+blockTrailerLen] + b[n] = blockTypeSnappyCompression + } else { + tmp := buf.Alloc(blockTrailerLen) + tmp[0] = blockTypeNoCompression + b = buf.Bytes() + } + + // Calculate the checksum. + n := len(b) - 4 + checksum := util.NewCRC(b[:n]).Value() + binary.LittleEndian.PutUint32(b[n:], checksum) + + // Write the buffer to the file. + _, err = w.writer.Write(b) + if err != nil { + return + } + bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} + w.offset += uint64(len(b)) + return +} + +func (w *Writer) flushPendingBH(key []byte) { + if w.pendingBH.length == 0 { + return + } + var separator []byte + if len(key) == 0 { + separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) + } else { + separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) + } + if separator == nil { + separator = w.dataBlock.prevKey + } else { + w.comparerScratch = separator + } + n := encodeBlockHandle(w.scratch[:20], w.pendingBH) + // Append the block handle to the index block. + w.indexBlock.append(separator, w.scratch[:n]) + // Reset prev key of the data block. + w.dataBlock.prevKey = w.dataBlock.prevKey[:0] + // Clear pending block handle. + w.pendingBH = blockHandle{} +} + +func (w *Writer) finishBlock() error { + w.dataBlock.finish() + bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + return err + } + w.pendingBH = bh + // Reset the data block. + w.dataBlock.reset() + // Flush the filter block. + w.filterBlock.flush(w.offset) + return nil +} + +// Append appends key/value pair to the table. The keys passed must +// be in increasing order. +// +// It is safe to modify the contents of the arguments after Append returns. +func (w *Writer) Append(key, value []byte) error { + if w.err != nil { + return w.err + } + if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { + w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) + return w.err + } + + w.flushPendingBH(key) + // Append key/value pair to the data block. + w.dataBlock.append(key, value) + // Add key to the filter block. + w.filterBlock.add(key) + + // Finish the data block if block size target reached. + if w.dataBlock.bytesLen() >= w.blockSize { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.nEntries++ + return nil +} + +// BlocksLen returns number of blocks written so far. +func (w *Writer) BlocksLen() int { + n := w.indexBlock.nEntries + if w.pendingBH.length > 0 { + // Includes the pending block. + n++ + } + return n +} + +// EntriesLen returns number of entries added so far. +func (w *Writer) EntriesLen() int { + return w.nEntries +} + +// BytesLen returns number of bytes written so far. +func (w *Writer) BytesLen() int { + return int(w.offset) +} + +// Close will finalize the table. Calling Append is not possible +// after Close, but calling BlocksLen, EntriesLen and BytesLen +// is still possible. +func (w *Writer) Close() error { + if w.err != nil { + return w.err + } + + // Write the last data block. Or empty data block if there + // aren't any data blocks at all. + if w.dataBlock.nEntries > 0 || w.nEntries == 0 { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.flushPendingBH(nil) + + // Write the filter block. + var filterBH blockHandle + w.filterBlock.finish() + if buf := &w.filterBlock.buf; buf.Len() > 0 { + filterBH, w.err = w.writeBlock(buf, opt.NoCompression) + if w.err != nil { + return w.err + } + } + + // Write the metaindex block. + if filterBH.length > 0 { + key := []byte("filter." + w.filter.Name()) + n := encodeBlockHandle(w.scratch[:20], filterBH) + w.dataBlock.append(key, w.scratch[:n]) + } + w.dataBlock.finish() + metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the index block. + w.indexBlock.finish() + indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the table footer. + footer := w.scratch[:footerLen] + for i := range footer { + footer[i] = 0 + } + n := encodeBlockHandle(footer, metaindexBH) + encodeBlockHandle(footer[n:], indexBH) + copy(footer[footerLen-len(magic):], magic) + if _, err := w.writer.Write(footer); err != nil { + w.err = err + return w.err + } + w.offset += footerLen + + w.err = errors.New("leveldb/table: writer is closed") + return nil +} + +// NewWriter creates a new initialized table writer for the file. +// +// Table writer is not goroutine-safe. +func NewWriter(f io.Writer, o *opt.Options) *Writer { + w := &Writer{ + writer: f, + cmp: o.GetComparer(), + filter: o.GetFilter(), + compression: o.GetCompression(), + blockSize: o.GetBlockSize(), + comparerScratch: make([]byte, 0), + } + // data block + w.dataBlock.restartInterval = o.GetBlockRestartInterval() + // The first 20-bytes are used for encoding block handle. + w.dataBlock.scratch = w.scratch[20:] + // index block + w.indexBlock.restartInterval = 1 + w.indexBlock.scratch = w.scratch[20:] + // filter block + if w.filter != nil { + w.filterBlock.generator = w.filter.NewGenerator() + w.filterBlock.flush(0) + } + return w +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go new file mode 100644 index 000000000..5b6e0344e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go @@ -0,0 +1,216 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type DB interface{} + +type Put interface { + TestPut(key []byte, value []byte) error +} + +type Delete interface { + TestDelete(key []byte) error +} + +type Find interface { + TestFind(key []byte) (rkey, rvalue []byte, err error) +} + +type Get interface { + TestGet(key []byte) (value []byte, err error) +} + +type NewIterator interface { + TestNewIterator(slice *util.Range) iterator.Iterator +} + +type DBAct int + +func (a DBAct) String() string { + switch a { + case DBNone: + return "none" + case DBPut: + return "put" + case DBOverwrite: + return "overwrite" + case DBDelete: + return "delete" + case DBDeleteNA: + return "delete_na" + } + return "unknown" +} + +const ( + DBNone DBAct = iota + DBPut + DBOverwrite + DBDelete + DBDeleteNA +) + +type DBTesting struct { + Rand *rand.Rand + DB interface { + Get + Put + Delete + } + PostFn func(t *DBTesting) + Deleted, Present KeyValue + Act, LastAct DBAct + ActKey, LastActKey []byte +} + +func (t *DBTesting) post() { + if t.PostFn != nil { + t.PostFn(t) + } +} + +func (t *DBTesting) setAct(act DBAct, key []byte) { + t.LastAct, t.Act = t.Act, act + t.LastActKey, t.ActKey = t.ActKey, key +} + +func (t *DBTesting) text() string { + return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) +} + +func (t *DBTesting) Text() string { + return "DBTesting " + t.text() +} + +func (t *DBTesting) TestPresentKV(key, value []byte) { + rvalue, err := t.DB.TestGet(key) + Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) + Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) +} + +func (t *DBTesting) TestAllPresent() { + t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { + t.TestPresentKV(key, value) + }) +} + +func (t *DBTesting) TestDeletedKey(key []byte) { + _, err := t.DB.TestGet(key) + Expect(err).Should(Equal(util.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) +} + +func (t *DBTesting) TestAllDeleted() { + t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { + t.TestDeletedKey(key) + }) +} + +func (t *DBTesting) TestAll() { + dn := t.Deleted.Len() + pn := t.Present.Len() + ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { + if i >= dn { + key, value := t.Present.Index(i - dn) + t.TestPresentKV(key, value) + } else { + t.TestDeletedKey(t.Deleted.KeyAt(i)) + } + }) +} + +func (t *DBTesting) Put(key, value []byte) { + if new := t.Present.PutU(key, value); new { + t.setAct(DBPut, key) + } else { + t.setAct(DBOverwrite, key) + } + t.Deleted.Delete(key) + err := t.DB.TestPut(key, value) + Expect(err).ShouldNot(HaveOccurred(), t.Text()) + t.TestPresentKV(key, value) + t.post() +} + +func (t *DBTesting) PutRandom() bool { + if t.Deleted.Len() > 0 { + i := t.Rand.Intn(t.Deleted.Len()) + key, value := t.Deleted.Index(i) + t.Put(key, value) + return true + } + return false +} + +func (t *DBTesting) Delete(key []byte) { + if exist, value := t.Present.Delete(key); exist { + t.setAct(DBDelete, key) + t.Deleted.PutU(key, value) + } else { + t.setAct(DBDeleteNA, key) + } + err := t.DB.TestDelete(key) + Expect(err).ShouldNot(HaveOccurred(), t.Text()) + t.TestDeletedKey(key) + t.post() +} + +func (t *DBTesting) DeleteRandom() bool { + if t.Present.Len() > 0 { + i := t.Rand.Intn(t.Present.Len()) + t.Delete(t.Present.KeyAt(i)) + return true + } + return false +} + +func (t *DBTesting) RandomAct(round int) { + for i := 0; i < round; i++ { + if t.Rand.Int()%2 == 0 { + t.PutRandom() + } else { + t.DeleteRandom() + } + } +} + +func DoDBTesting(t *DBTesting) { + if t.Rand == nil { + t.Rand = NewRand() + } + + t.DeleteRandom() + t.PutRandom() + t.DeleteRandom() + t.DeleteRandom() + for i := t.Deleted.Len() / 2; i >= 0; i-- { + t.PutRandom() + } + t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) + + // Additional iterator testing + if db, ok := t.DB.(NewIterator); ok { + iter := db.TestNewIterator(nil) + Expect(iter.Error()).NotTo(HaveOccurred()) + + it := IteratorTesting{ + KeyValue: t.Present, + Iter: iter, + } + + DoIteratorTesting(&it) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go new file mode 100644 index 000000000..60c7cf362 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go @@ -0,0 +1,327 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" +) + +type IterAct int + +func (a IterAct) String() string { + switch a { + case IterNone: + return "none" + case IterFirst: + return "first" + case IterLast: + return "last" + case IterPrev: + return "prev" + case IterNext: + return "next" + case IterSeek: + return "seek" + case IterSOI: + return "soi" + case IterEOI: + return "eoi" + } + return "unknown" +} + +const ( + IterNone IterAct = iota + IterFirst + IterLast + IterPrev + IterNext + IterSeek + IterSOI + IterEOI +) + +type IteratorTesting struct { + KeyValue + Iter iterator.Iterator + Rand *rand.Rand + PostFn func(t *IteratorTesting) + Pos int + Act, LastAct IterAct + + once bool +} + +func (t *IteratorTesting) init() { + if !t.once { + t.Pos = -1 + t.once = true + } +} + +func (t *IteratorTesting) post() { + if t.PostFn != nil { + t.PostFn(t) + } +} + +func (t *IteratorTesting) setAct(act IterAct) { + t.LastAct, t.Act = t.Act, act +} + +func (t *IteratorTesting) text() string { + return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) +} + +func (t *IteratorTesting) Text() string { + return "IteratorTesting is " + t.text() +} + +func (t *IteratorTesting) IsFirst() bool { + t.init() + return t.Len() > 0 && t.Pos == 0 +} + +func (t *IteratorTesting) IsLast() bool { + t.init() + return t.Len() > 0 && t.Pos == t.Len()-1 +} + +func (t *IteratorTesting) TestKV() { + t.init() + key, value := t.Index(t.Pos) + Expect(t.Iter.Key()).NotTo(BeNil()) + Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) + Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) +} + +func (t *IteratorTesting) First() { + t.init() + t.setAct(IterFirst) + + ok := t.Iter.First() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Len() > 0 { + t.Pos = 0 + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = -1 + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Last() { + t.init() + t.setAct(IterLast) + + ok := t.Iter.Last() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Len() > 0 { + t.Pos = t.Len() - 1 + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = 0 + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Next() { + t.init() + t.setAct(IterNext) + + ok := t.Iter.Next() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Pos < t.Len()-1 { + t.Pos++ + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = t.Len() + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Prev() { + t.init() + t.setAct(IterPrev) + + ok := t.Iter.Prev() + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if t.Pos > 0 { + t.Pos-- + Expect(ok).Should(BeTrue(), t.Text()) + t.TestKV() + } else { + t.Pos = -1 + Expect(ok).ShouldNot(BeTrue(), t.Text()) + } + t.post() +} + +func (t *IteratorTesting) Seek(i int) { + t.init() + t.setAct(IterSeek) + + key, _ := t.Index(i) + oldKey, _ := t.IndexOrNil(t.Pos) + + ok := t.Iter.Seek(key) + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) + + t.Pos = i + t.TestKV() + t.post() +} + +func (t *IteratorTesting) SeekInexact(i int) { + t.init() + t.setAct(IterSeek) + var key0 []byte + key1, _ := t.Index(i) + if i > 0 { + key0, _ = t.Index(i - 1) + } + key := BytesSeparator(key0, key1) + oldKey, _ := t.IndexOrNil(t.Pos) + + ok := t.Iter.Seek(key) + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) + + t.Pos = i + t.TestKV() + t.post() +} + +func (t *IteratorTesting) SeekKey(key []byte) { + t.init() + t.setAct(IterSeek) + oldKey, _ := t.IndexOrNil(t.Pos) + i := t.Search(key) + + ok := t.Iter.Seek(key) + Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) + if i < t.Len() { + key_, _ := t.Index(i) + Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) + t.Pos = i + t.TestKV() + } else { + Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) + } + + t.Pos = i + t.post() +} + +func (t *IteratorTesting) SOI() { + t.init() + t.setAct(IterSOI) + Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) + for i := 0; i < 3; i++ { + t.Prev() + } + t.post() +} + +func (t *IteratorTesting) EOI() { + t.init() + t.setAct(IterEOI) + Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) + for i := 0; i < 3; i++ { + t.Next() + } + t.post() +} + +func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { + t.init() + for old := t.Pos; t.Pos > 0; old = t.Pos { + fn(t) + Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) + } +} + +func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { + t.init() + for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { + fn(t) + Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) + } +} + +func (t *IteratorTesting) PrevAll() { + t.WalkPrev(func(t *IteratorTesting) { + t.Prev() + }) +} + +func (t *IteratorTesting) NextAll() { + t.WalkNext(func(t *IteratorTesting) { + t.Next() + }) +} + +func DoIteratorTesting(t *IteratorTesting) { + if t.Rand == nil { + t.Rand = NewRand() + } + t.SOI() + t.NextAll() + t.First() + t.SOI() + t.NextAll() + t.EOI() + t.PrevAll() + t.Last() + t.EOI() + t.PrevAll() + t.SOI() + + t.NextAll() + t.PrevAll() + t.NextAll() + t.Last() + t.PrevAll() + t.First() + t.NextAll() + t.EOI() + + ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { + t.Seek(i) + }) + + ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { + t.SeekInexact(i) + }) + + ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { + t.Seek(i) + if i%2 != 0 { + t.PrevAll() + t.SOI() + } else { + t.NextAll() + t.EOI() + } + }) + + for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { + t.SeekKey([]byte(key)) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go new file mode 100644 index 000000000..56ef2a455 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go @@ -0,0 +1,352 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + "sort" + "strings" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type KeyValueEntry struct { + key, value []byte +} + +type KeyValue struct { + entries []KeyValueEntry + nbytes int +} + +func (kv *KeyValue) Put(key, value []byte) { + if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { + panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) + } + kv.entries = append(kv.entries, KeyValueEntry{key, value}) + kv.nbytes += len(key) + len(value) +} + +func (kv *KeyValue) PutString(key, value string) { + kv.Put([]byte(key), []byte(value)) +} + +func (kv *KeyValue) PutU(key, value []byte) bool { + if i, exist := kv.Get(key); !exist { + if i < kv.Len() { + kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) + kv.entries[i] = KeyValueEntry{key, value} + } else { + kv.entries = append(kv.entries, KeyValueEntry{key, value}) + } + kv.nbytes += len(key) + len(value) + return true + } else { + kv.nbytes += len(value) - len(kv.ValueAt(i)) + kv.entries[i].value = value + } + return false +} + +func (kv *KeyValue) PutUString(key, value string) bool { + return kv.PutU([]byte(key), []byte(value)) +} + +func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { + i, exist := kv.Get(key) + if exist { + value = kv.entries[i].value + kv.DeleteIndex(i) + } + return +} + +func (kv *KeyValue) DeleteIndex(i int) bool { + if i < kv.Len() { + kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) + kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) + return true + } + return false +} + +func (kv KeyValue) Len() int { + return len(kv.entries) +} + +func (kv *KeyValue) Size() int { + return kv.nbytes +} + +func (kv KeyValue) KeyAt(i int) []byte { + return kv.entries[i].key +} + +func (kv KeyValue) ValueAt(i int) []byte { + return kv.entries[i].value +} + +func (kv KeyValue) Index(i int) (key, value []byte) { + if i < 0 || i >= len(kv.entries) { + panic(fmt.Sprintf("Index #%d: out of range", i)) + } + return kv.entries[i].key, kv.entries[i].value +} + +func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { + key, value = kv.Index(i) + var key0 []byte + var key1 = kv.KeyAt(i) + if i > 0 { + key0 = kv.KeyAt(i - 1) + } + key_ = BytesSeparator(key0, key1) + return +} + +func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { + if i >= 0 && i < len(kv.entries) { + return kv.entries[i].key, kv.entries[i].value + } + return nil, nil +} + +func (kv KeyValue) IndexString(i int) (key, value string) { + key_, _value := kv.Index(i) + return string(key_), string(_value) +} + +func (kv KeyValue) Search(key []byte) int { + return sort.Search(kv.Len(), func(i int) bool { + return cmp.Compare(kv.KeyAt(i), key) >= 0 + }) +} + +func (kv KeyValue) SearchString(key string) int { + return kv.Search([]byte(key)) +} + +func (kv KeyValue) Get(key []byte) (i int, exist bool) { + i = kv.Search(key) + if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { + exist = true + } + return +} + +func (kv KeyValue) GetString(key string) (i int, exist bool) { + return kv.Get([]byte(key)) +} + +func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { + for i, x := range kv.entries { + fn(i, x.key, x.value) + } +} + +func (kv KeyValue) IterateString(fn func(i int, key, value string)) { + kv.Iterate(func(i int, key, value []byte) { + fn(i, string(key), string(value)) + }) +} + +func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { + ShuffledIndex(rnd, kv.Len(), 1, func(i int) { + fn(i, kv.entries[i].key, kv.entries[i].value) + }) +} + +func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { + kv.IterateShuffled(rnd, func(i int, key, value []byte) { + fn(i, string(key), string(value)) + }) +} + +func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { + for i := range kv.entries { + key_, key, value := kv.IndexInexact(i) + fn(i, key_, key, value) + } +} + +func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { + kv.IterateInexact(func(i int, key_, key, value []byte) { + fn(i, string(key_), string(key), string(value)) + }) +} + +func (kv KeyValue) Clone() KeyValue { + return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} +} + +func (kv KeyValue) Slice(start, limit int) KeyValue { + if start < 0 || limit > kv.Len() { + panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) + } else if limit < start { + panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) + } + return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} +} + +func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { + start_ := 0 + limit_ := kv.Len() + if start != nil { + start_ = kv.Search(start) + } + if limit != nil { + limit_ = kv.Search(limit) + } + return kv.Slice(start_, limit_) +} + +func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { + return kv.SliceKey([]byte(start), []byte(limit)) +} + +func (kv KeyValue) SliceRange(r *util.Range) KeyValue { + if r != nil { + return kv.SliceKey(r.Start, r.Limit) + } + return kv.Clone() +} + +func (kv KeyValue) Range(start, limit int) (r util.Range) { + if kv.Len() > 0 { + if start == kv.Len() { + r.Start = BytesAfter(kv.KeyAt(start - 1)) + } else { + r.Start = kv.KeyAt(start) + } + } + if limit < kv.Len() { + r.Limit = kv.KeyAt(limit) + } + return +} + +func KeyValue_EmptyKey() *KeyValue { + kv := &KeyValue{} + kv.PutString("", "v") + return kv +} + +func KeyValue_EmptyValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("abc", "") + kv.PutString("abcd", "") + return kv +} + +func KeyValue_OneKeyValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("abc", "v") + return kv +} + +func KeyValue_BigValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("big1", strings.Repeat("1", 200000)) + return kv +} + +func KeyValue_SpecialKey() *KeyValue { + kv := &KeyValue{} + kv.PutString("\xff\xff", "v3") + return kv +} + +func KeyValue_MultipleKeyValue() *KeyValue { + kv := &KeyValue{} + kv.PutString("a", "v") + kv.PutString("aa", "v1") + kv.PutString("aaa", "v2") + kv.PutString("aaacccccccccc", "v2") + kv.PutString("aaaccccccccccd", "v3") + kv.PutString("aaaccccccccccf", "v4") + kv.PutString("aaaccccccccccfg", "v5") + kv.PutString("ab", "v6") + kv.PutString("abc", "v7") + kv.PutString("abcd", "v8") + kv.PutString("accccccccccccccc", "v9") + kv.PutString("b", "v10") + kv.PutString("bb", "v11") + kv.PutString("bc", "v12") + kv.PutString("c", "v13") + kv.PutString("c1", "v13") + kv.PutString("czzzzzzzzzzzzzz", "v14") + kv.PutString("fffffffffffffff", "v15") + kv.PutString("g11", "v15") + kv.PutString("g111", "v15") + kv.PutString("g111\xff", "v15") + kv.PutString("zz", "v16") + kv.PutString("zzzzzzz", "v16") + kv.PutString("zzzzzzzzzzzzzzzz", "v16") + return kv +} + +var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") + +func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { + if rnd == nil { + rnd = NewRand() + } + if maxlen < minlen { + panic("max len should >= min len") + } + + rrand := func(min, max int) int { + if min == max { + return max + } + return rnd.Intn(max-min) + min + } + + kv := &KeyValue{} + endC := byte(len(keymap) - 1) + gen := make([]byte, 0, maxlen) + for i := 0; i < n; i++ { + m := rrand(minlen, maxlen) + last := gen + retry: + gen = last[:m] + if k := len(last); m > k { + for j := k; j < m; j++ { + gen[j] = 0 + } + } else { + for j := m - 1; j >= 0; j-- { + c := last[j] + if c == endC { + continue + } + gen[j] = c + 1 + for j += 1; j < m; j++ { + gen[j] = 0 + } + goto ok + } + if m < maxlen { + m++ + goto retry + } + panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) + ok: + } + key := make([]byte, m) + for j := 0; j < m; j++ { + key[j] = keymap[gen[j]] + } + value := make([]byte, rrand(vminlen, vmaxlen)) + for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { + value[n] = 'x' + } + kv.Put(key, value) + } + return kv +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go new file mode 100644 index 000000000..77547aaa2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go @@ -0,0 +1,136 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "fmt" + "math/rand" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) { + if rnd == nil { + rnd = NewRand() + } + + if db, ok := p.(Find); ok { + It("Should find all keys with Find", func() { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, value := kv.IndexInexact(i) + + // Using exact key. + rkey, rvalue, err := db.TestFind(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(rkey).Should(Equal(key), "Key") + Expect(rvalue).Should(Equal(value), "Value for key %q", key) + + // Using inexact key. + rkey, rvalue, err = db.TestFind(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) + Expect(rkey).Should(Equal(key)) + Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) + }) + }) + + It("Should return error if the key is not present", func() { + var key []byte + if kv.Len() > 0 { + key_, _ := kv.Index(kv.Len() - 1) + key = BytesAfter(key_) + } + rkey, _, err := db.TestFind(key) + Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) + Expect(err).Should(Equal(util.ErrNotFound)) + }) + } + + if db, ok := p.(Get); ok { + It("Should only find exact key with Get", func() { + ShuffledIndex(nil, kv.Len(), 1, func(i int) { + key_, key, value := kv.IndexInexact(i) + + // Using exact key. + rvalue, err := db.TestGet(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) + Expect(rvalue).Should(Equal(value), "Value for key %q", key) + + // Using inexact key. + if len(key_) > 0 { + _, err = db.TestGet(key_) + Expect(err).Should(HaveOccurred(), "Error for key %q", key_) + Expect(err).Should(Equal(util.ErrNotFound)) + } + }) + }) + } + + if db, ok := p.(NewIterator); ok { + TestIter := func(r *util.Range, _kv KeyValue) { + iter := db.TestNewIterator(r) + Expect(iter.Error()).ShouldNot(HaveOccurred()) + + t := IteratorTesting{ + KeyValue: _kv, + Iter: iter, + } + + DoIteratorTesting(&t) + } + + It("Should iterates and seeks correctly", func(done Done) { + TestIter(nil, kv.Clone()) + done <- true + }, 3.0) + + RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) { + type slice struct { + r *util.Range + start, limit int + } + + key_, _, _ := kv.IndexInexact(i) + for _, x := range []slice{ + {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, + {&util.Range{Start: nil, Limit: key_}, 0, i}, + } { + It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { + TestIter(x.r, kv.Slice(x.start, x.limit)) + done <- true + }, 3.0) + } + }) + + RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) { + It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { + r := kv.Range(start, limit) + TestIter(&r, kv.Slice(start, limit)) + done <- true + }, 3.0) + }) + } +} + +func AllKeyValueTesting(rnd *rand.Rand, body func(kv KeyValue) DB) { + Test := func(kv *KeyValue) func() { + return func() { + db := body(*kv) + KeyValueTesting(rnd, db, *kv) + } + } + + Describe("with no key/value (empty)", Test(&KeyValue{})) + Describe("with empty key", Test(KeyValue_EmptyKey())) + Describe("with empty value", Test(KeyValue_EmptyValue())) + Describe("with one key/value", Test(KeyValue_OneKeyValue())) + Describe("with big value", Test(KeyValue_BigValue())) + Describe("with special key", Test(KeyValue_SpecialKey())) + Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go new file mode 100644 index 000000000..4c8c3f0a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go @@ -0,0 +1,585 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + storageMu sync.Mutex + storageUseFS bool = true + storageKeepFS bool = false + storageNum int +) + +type StorageMode int + +const ( + ModeOpen StorageMode = 1 << iota + ModeCreate + ModeRemove + ModeRead + ModeWrite + ModeSync + ModeClose +) + +const ( + modeOpen = iota + modeCreate + modeRemove + modeRead + modeWrite + modeSync + modeClose + + modeCount +) + +const ( + typeManifest = iota + typeJournal + typeTable + typeTemp + + typeCount +) + +const flattenCount = modeCount * typeCount + +func flattenType(m StorageMode, t storage.FileType) int { + var x int + switch m { + case ModeOpen: + x = modeOpen + case ModeCreate: + x = modeCreate + case ModeRemove: + x = modeRemove + case ModeRead: + x = modeRead + case ModeWrite: + x = modeWrite + case ModeSync: + x = modeSync + case ModeClose: + x = modeClose + default: + panic("invalid storage mode") + } + x *= typeCount + switch t { + case storage.TypeManifest: + return x + typeManifest + case storage.TypeJournal: + return x + typeJournal + case storage.TypeTable: + return x + typeTable + case storage.TypeTemp: + return x + typeTemp + default: + panic("invalid file type") + } +} + +func listFlattenType(m StorageMode, t storage.FileType) []int { + ret := make([]int, 0, flattenCount) + add := func(x int) { + x *= typeCount + switch { + case t&storage.TypeManifest != 0: + ret = append(ret, x+typeManifest) + case t&storage.TypeJournal != 0: + ret = append(ret, x+typeJournal) + case t&storage.TypeTable != 0: + ret = append(ret, x+typeTable) + case t&storage.TypeTemp != 0: + ret = append(ret, x+typeTemp) + } + } + switch { + case m&ModeOpen != 0: + add(modeOpen) + case m&ModeCreate != 0: + add(modeCreate) + case m&ModeRemove != 0: + add(modeRemove) + case m&ModeRead != 0: + add(modeRead) + case m&ModeWrite != 0: + add(modeWrite) + case m&ModeSync != 0: + add(modeSync) + case m&ModeClose != 0: + add(modeClose) + } + return ret +} + +func packFile(num uint64, t storage.FileType) uint64 { + if num>>(64-typeCount) != 0 { + panic("overflow") + } + return num<> typeCount, storage.FileType(x) & storage.TypeAll +} + +type emulatedError struct { + err error +} + +func (err emulatedError) Error() string { + return fmt.Sprintf("emulated storage error: %v", err.err) +} + +type storageLock struct { + s *Storage + r util.Releaser +} + +func (l storageLock) Release() { + l.r.Release() + l.s.logI("storage lock released") +} + +type reader struct { + f *file + storage.Reader +} + +func (r *reader) Read(p []byte) (n int, err error) { + err = r.f.s.emulateError(ModeRead, r.f.Type()) + if err == nil { + r.f.s.stall(ModeRead, r.f.Type()) + n, err = r.Reader.Read(p) + } + r.f.s.count(ModeRead, r.f.Type(), n) + if err != nil && err != io.EOF { + r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) + } + return +} + +func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { + err = r.f.s.emulateError(ModeRead, r.f.Type()) + if err == nil { + r.f.s.stall(ModeRead, r.f.Type()) + n, err = r.Reader.ReadAt(p, off) + } + r.f.s.count(ModeRead, r.f.Type(), n) + if err != nil && err != io.EOF { + r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) + } + return +} + +func (r *reader) Close() (err error) { + return r.f.doClose(r.Reader) +} + +type writer struct { + f *file + storage.Writer +} + +func (w *writer) Write(p []byte) (n int, err error) { + err = w.f.s.emulateError(ModeWrite, w.f.Type()) + if err == nil { + w.f.s.stall(ModeWrite, w.f.Type()) + n, err = w.Writer.Write(p) + } + w.f.s.count(ModeWrite, w.f.Type(), n) + if err != nil && err != io.EOF { + w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) + } + return +} + +func (w *writer) Sync() (err error) { + err = w.f.s.emulateError(ModeSync, w.f.Type()) + if err == nil { + w.f.s.stall(ModeSync, w.f.Type()) + err = w.Writer.Sync() + } + w.f.s.count(ModeSync, w.f.Type(), 0) + if err != nil { + w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) + } + return +} + +func (w *writer) Close() (err error) { + return w.f.doClose(w.Writer) +} + +type file struct { + s *Storage + storage.File +} + +func (f *file) pack() uint64 { + return packFile(f.Num(), f.Type()) +} + +func (f *file) assertOpen() { + ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) +} + +func (f *file) doClose(closer io.Closer) (err error) { + err = f.s.emulateError(ModeClose, f.Type()) + if err == nil { + f.s.stall(ModeClose, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) + err = closer.Close() + } + f.s.countNB(ModeClose, f.Type(), 0) + writer := f.s.opens[f.pack()] + if err != nil { + f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) + } else { + f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) + delete(f.s.opens, f.pack()) + } + return +} + +func (f *file) Open() (r storage.Reader, err error) { + err = f.s.emulateError(ModeOpen, f.Type()) + if err == nil { + f.s.stall(ModeOpen, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + f.assertOpen() + f.s.countNB(ModeOpen, f.Type(), 0) + r, err = f.File.Open() + } + if err != nil { + f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) + } else { + f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) + f.s.opens[f.pack()] = false + r = &reader{f, r} + } + return +} + +func (f *file) Create() (w storage.Writer, err error) { + err = f.s.emulateError(ModeCreate, f.Type()) + if err == nil { + f.s.stall(ModeCreate, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + f.assertOpen() + f.s.countNB(ModeCreate, f.Type(), 0) + w, err = f.File.Create() + } + if err != nil { + f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) + } else { + f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) + f.s.opens[f.pack()] = true + w = &writer{f, w} + } + return +} + +func (f *file) Remove() (err error) { + err = f.s.emulateError(ModeRemove, f.Type()) + if err == nil { + f.s.stall(ModeRemove, f.Type()) + } + f.s.mu.Lock() + defer f.s.mu.Unlock() + if err == nil { + f.assertOpen() + f.s.countNB(ModeRemove, f.Type(), 0) + err = f.File.Remove() + } + if err != nil { + f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) + } else { + f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) + } + return +} + +type Storage struct { + storage.Storage + closeFn func() error + + lmu sync.Mutex + lb bytes.Buffer + + mu sync.Mutex + // Open files, true=writer, false=reader + opens map[uint64]bool + counters [flattenCount]int + bytesCounter [flattenCount]int64 + emulatedError [flattenCount]error + stallCond sync.Cond + stalled [flattenCount]bool +} + +func (s *Storage) log(skip int, str string) { + s.lmu.Lock() + defer s.lmu.Unlock() + _, file, line, ok := runtime.Caller(skip + 2) + if ok { + // Truncate file name at last file name separator. + if index := strings.LastIndex(file, "/"); index >= 0 { + file = file[index+1:] + } else if index = strings.LastIndex(file, "\\"); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + line = 1 + } + fmt.Fprintf(&s.lb, "%s:%d: ", file, line) + lines := strings.Split(str, "\n") + if l := len(lines); l > 1 && lines[l-1] == "" { + lines = lines[:l-1] + } + for i, line := range lines { + if i > 0 { + s.lb.WriteString("\n\t") + } + s.lb.WriteString(line) + } + s.lb.WriteByte('\n') +} + +func (s *Storage) logISkip(skip int, format string, args ...interface{}) { + pc, _, _, ok := runtime.Caller(skip + 1) + if ok { + if f := runtime.FuncForPC(pc); f != nil { + fname := f.Name() + if index := strings.LastIndex(fname, "."); index >= 0 { + fname = fname[index+1:] + } + format = fname + ": " + format + } + } + s.log(skip+1, fmt.Sprintf(format, args...)) +} + +func (s *Storage) logI(format string, args ...interface{}) { + s.logISkip(1, format, args...) +} + +func (s *Storage) Log(str string) { + s.log(1, "Log: "+str) +} + +func (s *Storage) Lock() (r util.Releaser, err error) { + r, err = s.Storage.Lock() + if err != nil { + s.logI("storage locking failed, err=%v", err) + } else { + s.logI("storage locked") + r = storageLock{s, r} + } + return +} + +func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { + return &file{s, s.Storage.GetFile(num, t)} +} + +func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { + rfiles, err := s.Storage.GetFiles(t) + if err != nil { + s.logI("get files failed, err=%v", err) + return + } + files = make([]storage.File, len(rfiles)) + for i, f := range rfiles { + files[i] = &file{s, f} + } + s.logI("get files, type=0x%x count=%d", int(t), len(files)) + return +} + +func (s *Storage) GetManifest() (f storage.File, err error) { + manifest, err := s.Storage.GetManifest() + if err != nil { + if !os.IsNotExist(err) { + s.logI("get manifest failed, err=%v", err) + } + return + } + s.logI("get manifest, num=%d", manifest.Num()) + return &file{s, manifest}, nil +} + +func (s *Storage) SetManifest(f storage.File) error { + f_, ok := f.(*file) + ExpectWithOffset(1, ok).To(BeTrue()) + ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) + err := s.Storage.SetManifest(f_.File) + if err != nil { + s.logI("set manifest failed, err=%v", err) + } else { + s.logI("set manifest, num=%d", f_.Num()) + } + return err +} + +func (s *Storage) openFiles() string { + out := "Open files:" + for x, writer := range s.opens { + num, t := unpackFile(x) + out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) + } + return out +} + +func (s *Storage) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) + err := s.Storage.Close() + if err != nil { + s.logI("storage closing failed, err=%v", err) + } else { + s.logI("storage closed") + } + if s.closeFn != nil { + if err1 := s.closeFn(); err1 != nil { + s.logI("close func error, err=%v", err1) + } + } + return err +} + +func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { + s.counters[flattenType(m, t)]++ + s.bytesCounter[flattenType(m, t)] += int64(n) +} + +func (s *Storage) count(m StorageMode, t storage.FileType, n int) { + s.mu.Lock() + defer s.mu.Unlock() + s.countNB(m, t, n) +} + +func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { + for _, x := range listFlattenType(m, t) { + s.counters[x] = 0 + s.bytesCounter[x] = 0 + } +} + +func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { + for _, x := range listFlattenType(m, t) { + count += s.counters[x] + bytes += s.bytesCounter[x] + } + return +} + +func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.emulatedError[flattenType(m, t)] + if err != nil { + return emulatedError{err} + } + return nil +} + +func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.emulatedError[x] = err + } +} + +func (s *Storage) stall(m StorageMode, t storage.FileType) { + x := flattenType(m, t) + s.mu.Lock() + defer s.mu.Unlock() + for s.stalled[x] { + s.stallCond.Wait() + } +} + +func (s *Storage) Stall(m StorageMode, t storage.FileType) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.stalled[x] = true + } +} + +func (s *Storage) Release(m StorageMode, t storage.FileType) { + s.mu.Lock() + defer s.mu.Unlock() + for _, x := range listFlattenType(m, t) { + s.stalled[x] = false + } + s.stallCond.Broadcast() +} + +func NewStorage() *Storage { + var stor storage.Storage + var closeFn func() error + if storageUseFS { + for { + storageMu.Lock() + num := storageNum + storageNum++ + storageMu.Unlock() + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) + if _, err := os.Stat(path); os.IsNotExist(err) { + stor, err = storage.OpenFile(path) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) + closeFn = func() error { + if storageKeepFS { + return nil + } + return os.RemoveAll(path) + } + break + } + } + } else { + stor = storage.NewMemStorage() + } + s := &Storage{ + Storage: stor, + closeFn: closeFn, + opens: make(map[uint64]bool), + } + s.stallCond.L = &s.mu + return s +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go new file mode 100644 index 000000000..516e4e50d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go @@ -0,0 +1,157 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package testutil + +import ( + "bytes" + "flag" + "math/rand" + "reflect" + "sync" + + "github.com/onsi/ginkgo/config" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" +) + +var ( + runfn = make(map[string][]func()) + runmu sync.Mutex +) + +func Defer(args ...interface{}) bool { + var ( + group string + fn func() + ) + for _, arg := range args { + v := reflect.ValueOf(arg) + switch v.Kind() { + case reflect.String: + group = v.String() + case reflect.Func: + r := reflect.ValueOf(&fn).Elem() + r.Set(v) + } + } + if fn != nil { + runmu.Lock() + runfn[group] = append(runfn[group], fn) + runmu.Unlock() + } + return true +} + +func RunDefer(groups ...string) bool { + if len(groups) == 0 { + groups = append(groups, "") + } + runmu.Lock() + var runfn_ []func() + for _, group := range groups { + runfn_ = append(runfn_, runfn[group]...) + delete(runfn, group) + } + runmu.Unlock() + for _, fn := range runfn_ { + fn() + } + return runfn_ != nil +} + +func RandomSeed() int64 { + if !flag.Parsed() { + panic("random seed not initialized") + } + return config.GinkgoConfig.RandomSeed +} + +func NewRand() *rand.Rand { + return rand.New(rand.NewSource(RandomSeed())) +} + +var cmp = comparer.DefaultComparer + +func BytesSeparator(a, b []byte) []byte { + if bytes.Equal(a, b) { + return b + } + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && (a[i] == b[i]); i++ { + } + x := append([]byte{}, a[:i]...) + if i < n { + if c := a[i] + 1; c < b[i] { + return append(x, c) + } + x = append(x, a[i]) + i++ + } + for ; i < len(a); i++ { + if c := a[i]; c < 0xff { + return append(x, c+1) + } else { + x = append(x, c) + } + } + if len(b) > i && b[i] > 0 { + return append(x, b[i]-1) + } + return append(x, 'x') +} + +func BytesAfter(b []byte) []byte { + var x []byte + for _, c := range b { + if c < 0xff { + return append(x, c+1) + } else { + x = append(x, c) + } + } + return append(x, 'x') +} + +func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { + if rnd == nil { + rnd = NewRand() + } + for x := 0; x < round; x++ { + fn(rnd.Intn(n)) + } + return +} + +func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { + if rnd == nil { + rnd = NewRand() + } + for x := 0; x < round; x++ { + for _, i := range rnd.Perm(n) { + fn(i) + } + } + return +} + +func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { + if rnd == nil { + rnd = NewRand() + } + for x := 0; x < round; x++ { + start := rnd.Intn(n) + length := 0 + if j := n - start; j > 0 { + length = rnd.Intn(j) + } + fn(start, start+length) + } + return +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go new file mode 100644 index 000000000..1678e9e6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go @@ -0,0 +1,59 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + . "github.com/onsi/gomega" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +type testingDB struct { + *DB + ro *opt.ReadOptions + wo *opt.WriteOptions + stor *testutil.Storage +} + +func (t *testingDB) TestPut(key []byte, value []byte) error { + return t.Put(key, value, t.wo) +} + +func (t *testingDB) TestDelete(key []byte) error { + return t.Delete(key, t.wo) +} + +func (t *testingDB) TestGet(key []byte) (value []byte, err error) { + return t.Get(key, t.ro) +} + +func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.NewIterator(slice, t.ro) +} + +func (t *testingDB) TestClose() { + err := t.Close() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + err = t.stor.Close() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} + +func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { + stor := testutil.NewStorage() + db, err := Open(stor, o) + // FIXME: This may be called from outside It, which may cause panic. + Expect(err).NotTo(HaveOccurred()) + return &testingDB{ + DB: db, + ro: ro, + wo: wo, + stor: stor, + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go new file mode 100644 index 000000000..95bfc4320 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go @@ -0,0 +1,91 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" +) + +func shorten(str string) string { + if len(str) <= 4 { + return str + } + return str[:1] + ".." + str[len(str)-1:] +} + +var bunits = [...]string{"", "Ki", "Mi", "Gi"} + +func shortenb(bytes int) string { + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%d%sB", bytes, bunits[i]) +} + +func sshortenb(bytes int) string { + if bytes == 0 { + return "~" + } + sign := "+" + if bytes < 0 { + sign = "-" + bytes *= -1 + } + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) +} + +func sint(x int) string { + if x == 0 { + return "~" + } + sign := "+" + if x < 0 { + sign = "-" + x *= -1 + } + return fmt.Sprintf("%s%d", sign, x) +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +type files []storage.File + +func (p files) Len() int { + return len(p) +} + +func (p files) Less(i, j int) bool { + return p[i].Num() < p[j].Num() +} + +func (p files) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func (p files) sort() { + sort.Sort(p) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go new file mode 100644 index 000000000..21de24255 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go @@ -0,0 +1,293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +// This a copy of Go std bytes.Buffer with some modification +// and some features stripped. + +import ( + "bytes" + "io" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. +} + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + switch { + case n < 0 || n > b.Len(): + panic("leveldb/util.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Alloc allocs n bytes of slice from the buffer, growing the buffer as +// needed. If n is negative, Alloc will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Alloc(n int) []byte { + if n < 0 { + panic("leveldb/util.Buffer.Alloc: negative count") + } + m := b.grow(n) + return b.buf[m:] +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("leveldb/util.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with bytes.ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with bytes.ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(bytes.ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("leveldb/util.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// bytes.ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go new file mode 100644 index 000000000..554e28ebd --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go @@ -0,0 +1,205 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "fmt" + "sync/atomic" + "time" +) + +type buffer struct { + b []byte + miss int +} + +// BufferPool is a 'buffer pool'. +type BufferPool struct { + pool [6]chan []byte + size [5]uint32 + sizeMiss [5]uint32 + sizeHalf [5]uint32 + baseline [4]int + baselinex0 int + baselinex1 int + baseline0 int + baseline1 int + baseline2 int + close chan struct{} + + get uint32 + put uint32 + half uint32 + less uint32 + equal uint32 + greater uint32 + miss uint32 +} + +func (p *BufferPool) poolNum(n int) int { + if n <= p.baseline0 && n > p.baseline0/2 { + return 0 + } + for i, x := range p.baseline { + if n <= x { + return i + 1 + } + } + return len(p.baseline) + 1 +} + +// Get returns buffer with length of n. +func (p *BufferPool) Get(n int) []byte { + atomic.AddUint32(&p.get, 1) + + poolNum := p.poolNum(n) + pool := p.pool[poolNum] + if poolNum == 0 { + // Fast path. + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + select { + case pool <- b: + default: + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + } + default: + atomic.AddUint32(&p.miss, 1) + } + + return make([]byte, n, p.baseline0) + } else { + sizePtr := &p.size[poolNum-1] + + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + sizeHalfPtr := &p.sizeHalf[poolNum-1] + if atomic.AddUint32(sizeHalfPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) + atomic.StoreUint32(sizeHalfPtr, 0) + } else { + select { + case pool <- b: + default: + } + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { + select { + case pool <- b: + default: + } + } + } + default: + atomic.AddUint32(&p.miss, 1) + } + + if size := atomic.LoadUint32(sizePtr); uint32(n) > size { + if size == 0 { + atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) + } else { + sizeMissPtr := &p.sizeMiss[poolNum-1] + if atomic.AddUint32(sizeMissPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(n)) + atomic.StoreUint32(sizeMissPtr, 0) + } + } + return make([]byte, n) + } else { + return make([]byte, n, size) + } + } +} + +// Put adds given buffer to the pool. +func (p *BufferPool) Put(b []byte) { + atomic.AddUint32(&p.put, 1) + + pool := p.pool[p.poolNum(cap(b))] + select { + case pool <- b: + default: + } + +} + +func (p *BufferPool) Close() { + select { + case p.close <- struct{}{}: + default: + } +} + +func (p *BufferPool) String() string { + return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", + p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) +} + +func (p *BufferPool) drain() { + ticker := time.NewTicker(2 * time.Second) + for { + select { + case <-ticker.C: + for _, ch := range p.pool { + select { + case <-ch: + default: + } + } + case <-p.close: + for _, ch := range p.pool { + close(ch) + } + return + } + } +} + +// NewBufferPool creates a new initialized 'buffer pool'. +func NewBufferPool(baseline int) *BufferPool { + if baseline <= 0 { + panic("baseline can't be <= 0") + } + p := &BufferPool{ + baseline0: baseline, + baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, + close: make(chan struct{}, 1), + } + for i, cap := range []int{2, 2, 4, 4, 2, 1} { + p.pool[i] = make(chan []byte, cap) + } + go p.drain() + return p +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go new file mode 100644 index 000000000..87d96739c --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go @@ -0,0 +1,369 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +import ( + "bytes" + "io" + "math/rand" + "runtime" + "testing" +) + +const N = 10000 // make this bigger for a larger (and slower) test +var data string // test data for write tests +var testBytes []byte // test data; same as data but as a slice. + +func init() { + testBytes = make([]byte, N) + for i := 0; i < N; i++ { + testBytes[i] = 'a' + byte(i%26) + } + data = string(testBytes) +} + +// Verify that contents of buf match the string s. +func check(t *testing.T, testname string, buf *Buffer, s string) { + bytes := buf.Bytes() + str := buf.String() + if buf.Len() != len(bytes) { + t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) + } + + if buf.Len() != len(str) { + t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) + } + + if buf.Len() != len(s) { + t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) + } + + if string(bytes) != s { + t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) + } +} + +// Fill buf through n writes of byte slice fub. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.Write(fub) + if m != len(fub) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += string(fub) + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +func TestNewBuffer(t *testing.T) { + buf := NewBuffer(testBytes) + check(t, "NewBuffer", buf, data) +} + +// Empty buf through repeated reads into fub. +// The initial contents of buf corresponds to the string s. +func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { + check(t, testname+" (empty 1)", buf, s) + + for { + n, err := buf.Read(fub) + if n == 0 { + break + } + if err != nil { + t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) + } + s = s[n:] + check(t, testname+" (empty 3)", buf, s) + } + + check(t, testname+" (empty 4)", buf, "") +} + +func TestBasicOperations(t *testing.T) { + var buf Buffer + + for i := 0; i < 5; i++ { + check(t, "TestBasicOperations (1)", &buf, "") + + buf.Reset() + check(t, "TestBasicOperations (2)", &buf, "") + + buf.Truncate(0) + check(t, "TestBasicOperations (3)", &buf, "") + + n, err := buf.Write([]byte(data[0:1])) + if n != 1 { + t.Errorf("wrote 1 byte, but n == %d", n) + } + if err != nil { + t.Errorf("err should always be nil, but err == %s", err) + } + check(t, "TestBasicOperations (4)", &buf, "a") + + buf.WriteByte(data[1]) + check(t, "TestBasicOperations (5)", &buf, "ab") + + n, err = buf.Write([]byte(data[2:26])) + if n != 24 { + t.Errorf("wrote 25 bytes, but n == %d", n) + } + check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) + + buf.Truncate(26) + check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) + + buf.Truncate(20) + check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) + + empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) + empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) + + buf.WriteByte(data[1]) + c, err := buf.ReadByte() + if err != nil { + t.Error("ReadByte unexpected eof") + } + if c != data[1] { + t.Errorf("ReadByte wrong value c=%v", c) + } + c, err = buf.ReadByte() + if err == nil { + t.Error("ReadByte unexpected not eof") + } + } +} + +func TestLargeByteWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) + empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeByteWrites (3)", &buf, "") +} + +func TestLargeByteReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeByteReads (3)", &buf, "") +} + +func TestMixedReadsAndWrites(t *testing.T) { + var buf Buffer + s := "" + for i := 0; i < 50; i++ { + wlen := rand.Intn(len(data)) + s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) + rlen := rand.Intn(len(data)) + fub := make([]byte, rlen) + n, _ := buf.Read(fub) + s = s[n:] + } + empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) +} + +func TestNil(t *testing.T) { + var b *Buffer + if b.String() != "" { + t.Errorf("expected ; got %q", b.String()) + } +} + +func TestReadFrom(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + b.ReadFrom(&buf) + empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) + } +} + +func TestWriteTo(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + buf.WriteTo(&b) + empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) + } +} + +func TestNext(t *testing.T) { + b := []byte{0, 1, 2, 3, 4} + tmp := make([]byte, 5) + for i := 0; i <= 5; i++ { + for j := i; j <= 5; j++ { + for k := 0; k <= 6; k++ { + // 0 <= i <= j <= 5; 0 <= k <= 6 + // Check that if we start with a buffer + // of length j at offset i and ask for + // Next(k), we get the right bytes. + buf := NewBuffer(b[0:j]) + n, _ := buf.Read(tmp[0:i]) + if n != i { + t.Fatalf("Read %d returned %d", i, n) + } + bb := buf.Next(k) + want := k + if want > j-i { + want = j - i + } + if len(bb) != want { + t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) + } + for l, v := range bb { + if v != byte(l+i) { + t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) + } + } + } + } + } +} + +var readBytesTests = []struct { + buffer string + delim byte + expected []string + err error +}{ + {"", 0, []string{""}, io.EOF}, + {"a\x00", 0, []string{"a\x00"}, nil}, + {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, + {"hello\x01world", 1, []string{"hello\x01"}, nil}, + {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, + {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, + {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, +} + +func TestReadBytes(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBuffer([]byte(test.buffer)) + var err error + for _, expected := range test.expected { + var bytes []byte + bytes, err = buf.ReadBytes(test.delim) + if string(bytes) != expected { + t.Errorf("expected %q, got %q", expected, bytes) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func TestGrow(t *testing.T) { + x := []byte{'x'} + y := []byte{'y'} + tmp := make([]byte, 72) + for _, startLen := range []int{0, 100, 1000, 10000, 100000} { + xBytes := bytes.Repeat(x, startLen) + for _, growLen := range []int{0, 100, 1000, 10000, 100000} { + buf := NewBuffer(xBytes) + // If we read, this affects buf.off, which is good to test. + readBytes, _ := buf.Read(tmp) + buf.Grow(growLen) + yBytes := bytes.Repeat(y, growLen) + // Check no allocation occurs in write, as long as we're single-threaded. + var m1, m2 runtime.MemStats + runtime.ReadMemStats(&m1) + buf.Write(yBytes) + runtime.ReadMemStats(&m2) + if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { + t.Errorf("allocation occurred during write") + } + // Check that buffer has correct data. + if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { + t.Errorf("bad initial data at %d %d", startLen, growLen) + } + if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { + t.Errorf("bad written data at %d %d", startLen, growLen) + } + } + } +} + +// Was a bug: used to give EOF reading empty slice at EOF. +func TestReadEmptyAtEOF(t *testing.T) { + b := new(Buffer) + slice := make([]byte, 0) + n, err := b.Read(slice) + if err != nil { + t.Errorf("read error: %v", err) + } + if n != 0 { + t.Errorf("wrong count; got %d want 0", n) + } +} + +// Tests that we occasionally compact. Issue 5154. +func TestBufferGrowth(t *testing.T) { + var b Buffer + buf := make([]byte, 1024) + b.Write(buf[0:1]) + var cap0 int + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + if i == 0 { + cap0 = cap(b.buf) + } + } + cap1 := cap(b.buf) + // (*Buffer).grow allows for 2x capacity slop before sliding, + // so set our error threshold at 3x. + if cap1 > cap0*3 { + t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) + } +} + +// From Issue 5154. +func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf[0:1]) + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + } + } +} + +// Check that we don't compact too often. From Issue 5154. +func BenchmarkBufferFullSmallReads(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf) + for b.Len()+20 < cap(b.buf) { + b.Write(buf[:10]) + } + for i := 0; i < 5<<10; i++ { + b.Read(buf[:1]) + b.Write(buf[:1]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go new file mode 100644 index 000000000..631c9d610 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go @@ -0,0 +1,30 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "hash/crc32" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. +type CRC uint32 + +// NewCRC creates a new crc based on the given bytes. +func NewCRC(b []byte) CRC { + return CRC(0).Update(b) +} + +// Update updates the crc with the given bytes. +func (c CRC) Update(b []byte) CRC { + return CRC(crc32.Update(uint32(c), table, b)) +} + +// Value returns a masked crc. +func (c CRC) Value() uint32 { + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go new file mode 100644 index 000000000..54903660f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go @@ -0,0 +1,48 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "bytes" + "encoding/binary" +) + +// Hash return hash of the given data. +func Hash(data []byte, seed uint32) uint32 { + // Similar to murmur hash + var m uint32 = 0xc6a4a793 + var r uint32 = 24 + h := seed ^ (uint32(len(data)) * m) + + buf := bytes.NewBuffer(data) + for buf.Len() >= 4 { + var w uint32 + binary.Read(buf, binary.LittleEndian, &w) + h += w + h *= m + h ^= (h >> 16) + } + + rest := buf.Bytes() + switch len(rest) { + default: + panic("not reached") + case 3: + h += uint32(rest[2]) << 16 + fallthrough + case 2: + h += uint32(rest[1]) << 8 + fallthrough + case 1: + h += uint32(rest[0]) + h *= m + h ^= (h >> r) + case 0: + } + + return h +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go new file mode 100644 index 000000000..1f7fdd41f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go @@ -0,0 +1,21 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build go1.3 + +package util + +import ( + "sync" +) + +type Pool struct { + sync.Pool +} + +func NewPool(cap int) *Pool { + return &Pool{} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go new file mode 100644 index 000000000..27b8d03be --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go @@ -0,0 +1,33 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build !go1.3 + +package util + +type Pool struct { + pool chan interface{} +} + +func (p *Pool) Get() interface{} { + select { + case x := <-p.pool: + return x + default: + return nil + } +} + +func (p *Pool) Put(x interface{}) { + select { + case p.pool <- x: + default: + } +} + +func NewPool(cap int) *Pool { + return &Pool{pool: make(chan interface{}, cap)} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go new file mode 100644 index 000000000..85159583d --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go @@ -0,0 +1,32 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +// Range is a key range. +type Range struct { + // Start of the key range, include in the range. + Start []byte + + // Limit of the key range, not include in the range. + Limit []byte +} + +// BytesPrefix returns key range that satisfy the given prefix. +// This only applicable for the standard 'bytes comparer'. +func BytesPrefix(prefix []byte) *Range { + var limit []byte + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c < 0xff { + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + } + return &Range{prefix, limit} +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go new file mode 100644 index 000000000..229c7d41f --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go @@ -0,0 +1,49 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package util provides utilities used throughout leveldb. +package util + +import ( + "errors" +) + +var ( + ErrNotFound = errors.New("leveldb: not found") +) + +// Releaser is the interface that wraps the basic Release method. +type Releaser interface { + // Release releases associated resources. Release should always success + // and can be called multipe times without causing error. + Release() +} + +// ReleaseSetter is the interface that wraps the basic SetReleaser method. +type ReleaseSetter interface { + // SetReleaser associates the given releaser to the resources. The + // releaser will be called once coresponding resources released. + // Calling SetReleaser with nil will clear the releaser. + SetReleaser(releaser Releaser) +} + +// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. +type BasicReleaser struct { + releaser Releaser +} + +// Release implements Releaser.Release. +func (r *BasicReleaser) Release() { + if r.releaser != nil { + r.releaser.Release() + r.releaser = nil + } +} + +// SetReleaser implements ReleaseSetter.SetReleaser. +func (r *BasicReleaser) SetReleaser(releaser Releaser) { + r.releaser = releaser +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go new file mode 100644 index 000000000..3ab8daf4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go @@ -0,0 +1,436 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync/atomic" + "unsafe" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" +) + +var levelMaxSize [kNumLevels]float64 + +func init() { + // Precompute max size of each level + for level := range levelMaxSize { + res := float64(10 * 1048576) + for n := level; n > 1; n-- { + res *= 10 + } + levelMaxSize[level] = res + } +} + +type tSet struct { + level int + table *tFile +} + +type version struct { + s *session + + tables [kNumLevels]tFiles + + // Level that should be compacted next and its compaction score. + // Score < 1 means compaction is not strictly needed. These fields + // are initialized by computeCompaction() + cLevel int + cScore float64 + + cSeek unsafe.Pointer + + ref int + next *version +} + +func (v *version) release_NB() { + v.ref-- + if v.ref > 0 { + return + } + if v.ref < 0 { + panic("negative version ref") + } + + tables := make(map[uint64]bool) + for _, tt := range v.next.tables { + for _, t := range tt { + num := t.file.Num() + tables[num] = true + } + } + + for _, tt := range v.tables { + for _, t := range tt { + num := t.file.Num() + if _, ok := tables[num]; !ok { + v.s.tops.remove(t) + } + } + } + + v.next.release_NB() + v.next = nil +} + +func (v *version) release() { + v.s.vmu.Lock() + v.release_NB() + v.s.vmu.Unlock() +} + +func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { + ukey := ikey.ukey() + + // Walk tables level-by-level. + for level, tables := range v.tables { + if len(tables) == 0 { + continue + } + + if level == 0 { + // Level-0 files may overlap each other. Find all files that + // overlap ukey. + for _, t := range tables { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(level, t) { + return + } + } + } + } else { + if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { + t := tables[i] + if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + if !f(level, t) { + return + } + } + } + } + + if lf != nil && !lf(level) { + return + } + } +} + +func (v *version) get(ikey iKey, ro *opt.ReadOptions) (value []byte, tcomp bool, err error) { + ukey := ikey.ukey() + + var ( + tset *tSet + tseek bool + + l0found bool + l0seq uint64 + l0vt vType + l0val []byte + ) + + err = ErrNotFound + + // Since entries never hope across level, finding key/value + // in smaller level make later levels irrelevant. + v.walkOverlapping(ikey, func(level int, t *tFile) bool { + if !tseek { + if tset == nil { + tset = &tSet{level, t} + } else if tset.table.consumeSeek() <= 0 { + tseek = true + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + } + + ikey__, val_, err_ := v.s.tops.find(t, ikey, ro) + switch err_ { + case nil: + case ErrNotFound: + return true + default: + err = err_ + return false + } + + ikey_ := iKey(ikey__) + if seq, vt, ok := ikey_.parseNum(); ok { + if v.s.icmp.uCompare(ukey, ikey_.ukey()) != 0 { + return true + } + + if level == 0 { + if seq >= l0seq { + l0found = true + l0seq = seq + l0vt = vt + l0val = val_ + } + } else { + switch vt { + case tVal: + value = val_ + err = nil + case tDel: + default: + panic("leveldb: invalid internal key type") + } + return false + } + } else { + err = errors.New("leveldb: internal key corrupted") + return false + } + + return true + }, func(level int) bool { + if l0found { + switch l0vt { + case tVal: + value = l0val + err = nil + case tDel: + default: + panic("leveldb: invalid internal key type") + } + return false + } + + return true + }) + + return +} + +func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { + // Merge all level zero files together since they may overlap + for _, t := range v.tables[0] { + it := v.s.tops.newIterator(t, slice, ro) + its = append(its, it) + } + + strict := v.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) + for _, tables := range v.tables[1:] { + if len(tables) == 0 { + continue + } + + it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict, true) + its = append(its, it) + } + + return +} + +func (v *version) newStaging() *versionStaging { + return &versionStaging{base: v} +} + +// Spawn a new version based on this version. +func (v *version) spawn(r *sessionRecord) *version { + staging := v.newStaging() + staging.commit(r) + return staging.finish() +} + +func (v *version) fillRecord(r *sessionRecord) { + for level, ts := range v.tables { + for _, t := range ts { + r.addTableFile(level, t) + } + } +} + +func (v *version) tLen(level int) int { + return len(v.tables[level]) +} + +func (v *version) offsetOf(ikey iKey) (n uint64, err error) { + for level, tables := range v.tables { + for _, t := range tables { + if v.s.icmp.Compare(t.imax, ikey) <= 0 { + // Entire file is before "ikey", so just add the file size + n += t.size + } else if v.s.icmp.Compare(t.imin, ikey) > 0 { + // Entire file is after "ikey", so ignore + if level > 0 { + // Files other than level 0 are sorted by meta->min, so + // no further files in this level will contain data for + // "ikey". + break + } + } else { + // "ikey" falls in the range for this table. Add the + // approximate offset of "ikey" within the table. + var nn uint64 + nn, err = v.s.tops.offsetOf(t, ikey) + if err != nil { + return 0, err + } + n += nn + } + } + } + + return +} + +func (v *version) pickLevel(umin, umax []byte) (level int) { + if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < kMaxMemCompactLevel; level++ { + if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { + break + } + overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > kMaxGrandParentOverlapBytes { + break + } + } + } + + return +} + +func (v *version) computeCompaction() { + // Precomputed best level for next compaction + var bestLevel int = -1 + var bestScore float64 = -1 + + for level, tables := range v.tables { + var score float64 + if level == 0 { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compactions. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + score = float64(len(tables)) / kL0_CompactionTrigger + } else { + score = float64(tables.size()) / levelMaxSize[level] + } + + if score > bestScore { + bestLevel = level + bestScore = score + } + } + + v.cLevel = bestLevel + v.cScore = bestScore +} + +func (v *version) needCompaction() bool { + return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil +} + +type versionStaging struct { + base *version + tables [kNumLevels]struct { + added map[uint64]ntRecord + deleted map[uint64]struct{} + } +} + +func (p *versionStaging) commit(r *sessionRecord) { + // Deleted tables. + for _, r := range r.deletedTables { + tm := &(p.tables[r.level]) + + if len(p.base.tables[r.level]) > 0 { + if tm.deleted == nil { + tm.deleted = make(map[uint64]struct{}) + } + tm.deleted[r.num] = struct{}{} + } + + if tm.added != nil { + delete(tm.added, r.num) + } + } + + // New tables. + for _, r := range r.addedTables { + tm := &(p.tables[r.level]) + + if tm.added == nil { + tm.added = make(map[uint64]ntRecord) + } + tm.added[r.num] = r + + if tm.deleted != nil { + delete(tm.deleted, r.num) + } + } +} + +func (p *versionStaging) finish() *version { + // Build new version. + nv := &version{s: p.base.s} + for level, tm := range p.tables { + btables := p.base.tables[level] + + n := len(btables) + len(tm.added) - len(tm.deleted) + if n < 0 { + n = 0 + } + nt := make(tFiles, 0, n) + + // Base tables. + for _, t := range btables { + if _, ok := tm.deleted[t.file.Num()]; ok { + continue + } + if _, ok := tm.added[t.file.Num()]; ok { + continue + } + nt = append(nt, t) + } + + // New tables. + for _, r := range tm.added { + nt = append(nt, r.makeFile(p.base.s)) + } + + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } + nv.tables[level] = nt + } + + // Compute compaction score for new version. + nv.computeCompaction() + + return nv +} + +type versionReleaser struct { + v *version + once bool +} + +func (vr *versionReleaser) Release() { + v := vr.v + v.s.vmu.Lock() + if !vr.once { + v.release_NB() + vr.once = true + } + v.s.vmu.Unlock() +} diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..a6496bd45 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +all: + # no-op + +godep: + go get github.com/tools/godep + +# saves/vendors third-party dependencies to Godeps/_workspace +# -r flag rewrites import paths to use the vendored path +# ./... performs operation on all packages in tree +vendor: godep + godep save -r ./... diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index b8ff1584f..22f97514c 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,7 +1,7 @@ package bitswap import ( - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" blocks "github.com/jbenet/go-ipfs/blocks" peer "github.com/jbenet/go-ipfs/peer" routing "github.com/jbenet/go-ipfs/routing" @@ -9,7 +9,7 @@ import ( swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" - ds "github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" "time" ) diff --git a/bitswap/message.pb.go b/bitswap/message.pb.go index 5f15d98f5..a340ca073 100644 --- a/bitswap/message.pb.go +++ b/bitswap/message.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package bitswap -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/blocks/blocks.go b/blocks/blocks.go index 45aee6ab2..b514f85d9 100644 --- a/blocks/blocks.go +++ b/blocks/blocks.go @@ -1,8 +1,8 @@ package blocks import ( + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" u "github.com/jbenet/go-ipfs/util" - mh "github.com/jbenet/go-multihash" ) // Block is the ipfs blocks service. It is the way diff --git a/blockservice/blocks_test.go b/blockservice/blocks_test.go index 28034b711..c610fbd2a 100644 --- a/blockservice/blocks_test.go +++ b/blockservice/blocks_test.go @@ -4,7 +4,7 @@ import ( "bytes" "testing" - ds "github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" ) diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 9c114a5d6..8f923c76b 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -4,12 +4,12 @@ import ( "fmt" "time" - ds "github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" bitswap "github.com/jbenet/go-ipfs/bitswap" blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" - mh "github.com/jbenet/go-multihash" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) // BlockService is a block datastore. diff --git a/cmd/ipfs/add.go b/cmd/ipfs/add.go index ad28490ac..85f2d6836 100644 --- a/cmd/ipfs/add.go +++ b/cmd/ipfs/add.go @@ -6,8 +6,8 @@ import ( "os" "path/filepath" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" core "github.com/jbenet/go-ipfs/core" importer "github.com/jbenet/go-ipfs/importer" dag "github.com/jbenet/go-ipfs/merkledag" diff --git a/cmd/ipfs/cat.go b/cmd/ipfs/cat.go index 19b9fd346..4e6dd44b7 100644 --- a/cmd/ipfs/cat.go +++ b/cmd/ipfs/cat.go @@ -5,8 +5,8 @@ import ( "io" "os" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" dag "github.com/jbenet/go-ipfs/merkledag" u "github.com/jbenet/go-ipfs/util" ) diff --git a/cmd/ipfs/commands.go b/cmd/ipfs/commands.go index 8dccd358a..cc0ed089a 100644 --- a/cmd/ipfs/commands.go +++ b/cmd/ipfs/commands.go @@ -1,7 +1,7 @@ package main import ( - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" u "github.com/jbenet/go-ipfs/util" "strings" "time" diff --git a/cmd/ipfs/config.go b/cmd/ipfs/config.go index 4d7826a65..a3a10732e 100644 --- a/cmd/ipfs/config.go +++ b/cmd/ipfs/config.go @@ -3,8 +3,8 @@ package main import ( "errors" "fmt" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" config "github.com/jbenet/go-ipfs/config" u "github.com/jbenet/go-ipfs/util" "io" diff --git a/cmd/ipfs/init.go b/cmd/ipfs/init.go index a123d19eb..77f3fb9f0 100644 --- a/cmd/ipfs/init.go +++ b/cmd/ipfs/init.go @@ -5,8 +5,8 @@ import ( "errors" "os" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" config "github.com/jbenet/go-ipfs/config" ci "github.com/jbenet/go-ipfs/crypto" identify "github.com/jbenet/go-ipfs/identify" diff --git a/cmd/ipfs/ipfs.go b/cmd/ipfs/ipfs.go index d492298e3..b54766fca 100644 --- a/cmd/ipfs/ipfs.go +++ b/cmd/ipfs/ipfs.go @@ -4,8 +4,8 @@ import ( "fmt" "os" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" config "github.com/jbenet/go-ipfs/config" core "github.com/jbenet/go-ipfs/core" u "github.com/jbenet/go-ipfs/util" diff --git a/cmd/ipfs/ls.go b/cmd/ipfs/ls.go index 6a4270ce9..399b34fd3 100644 --- a/cmd/ipfs/ls.go +++ b/cmd/ipfs/ls.go @@ -1,8 +1,8 @@ package main import ( - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" u "github.com/jbenet/go-ipfs/util" ) diff --git a/cmd/ipfs/mount_unix.go b/cmd/ipfs/mount_unix.go index 4f6e34402..6d6fca583 100644 --- a/cmd/ipfs/mount_unix.go +++ b/cmd/ipfs/mount_unix.go @@ -5,8 +5,8 @@ package main import ( "fmt" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" rofs "github.com/jbenet/go-ipfs/fuse/readonly" u "github.com/jbenet/go-ipfs/util" ) diff --git a/cmd/ipfs/mount_windows.go b/cmd/ipfs/mount_windows.go index 8c3daf8e6..aabf4b4ff 100644 --- a/cmd/ipfs/mount_windows.go +++ b/cmd/ipfs/mount_windows.go @@ -2,8 +2,8 @@ package main import ( "errors" - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" ) var cmdIpfsMount = &commander.Command{ diff --git a/cmd/ipfs/refs.go b/cmd/ipfs/refs.go index 34623252d..0acac37bd 100644 --- a/cmd/ipfs/refs.go +++ b/cmd/ipfs/refs.go @@ -1,11 +1,11 @@ package main import ( - "github.com/gonuts/flag" - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" mdag "github.com/jbenet/go-ipfs/merkledag" u "github.com/jbenet/go-ipfs/util" - mh "github.com/jbenet/go-multihash" ) var cmdIpfsRefs = &commander.Command{ diff --git a/cmd/ipfs/version.go b/cmd/ipfs/version.go index 3f6fd3b4b..21e87ccc2 100644 --- a/cmd/ipfs/version.go +++ b/cmd/ipfs/version.go @@ -1,7 +1,7 @@ package main import ( - "github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" u "github.com/jbenet/go-ipfs/util" ) diff --git a/core/core.go b/core/core.go index 20d21188a..b369f2ffa 100644 --- a/core/core.go +++ b/core/core.go @@ -5,8 +5,9 @@ import ( "errors" "fmt" - ds "github.com/jbenet/datastore.go" - b58 "github.com/jbenet/go-base58" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" "github.com/jbenet/go-ipfs/bitswap" bserv "github.com/jbenet/go-ipfs/blockservice" config "github.com/jbenet/go-ipfs/config" @@ -18,7 +19,6 @@ import ( dht "github.com/jbenet/go-ipfs/routing/dht" swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" ) // IpfsNode is IPFS Core module. It represents an IPFS instance. diff --git a/core/datastore.go b/core/datastore.go index 10efc6582..5395e74a4 100644 --- a/core/datastore.go +++ b/core/datastore.go @@ -2,8 +2,8 @@ package core import ( "fmt" - ds "github.com/jbenet/datastore.go" - lds "github.com/jbenet/datastore.go/leveldb" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + lds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb" config "github.com/jbenet/go-ipfs/config" ) diff --git a/crypto/encode.pb.go b/crypto/encode.pb.go index a81e859fd..aa0b9875e 100644 --- a/crypto/encode.pb.go +++ b/crypto/encode.pb.go @@ -14,7 +14,7 @@ It has these top-level messages: */ package crypto -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/crypto/key.go b/crypto/key.go index 7e5027750..38b3b0ebd 100644 --- a/crypto/key.go +++ b/crypto/key.go @@ -14,7 +14,7 @@ import ( "hash" "math/big" - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) var ErrBadKeyType = errors.New("invalid or unsupported key type") diff --git a/crypto/rsa.go b/crypto/rsa.go index 31aa69596..513b868d1 100644 --- a/crypto/rsa.go +++ b/crypto/rsa.go @@ -8,7 +8,7 @@ import ( "crypto/x509" "errors" - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) type RsaPrivateKey struct { diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index 293a50e73..897c90d78 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -14,8 +14,8 @@ import ( "syscall" "time" - "bazil.org/fuse" - "bazil.org/fuse/fs" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" core "github.com/jbenet/go-ipfs/core" mdag "github.com/jbenet/go-ipfs/merkledag" u "github.com/jbenet/go-ipfs/util" diff --git a/identify/identify.go b/identify/identify.go index 017b03c0f..72555144c 100644 --- a/identify/identify.go +++ b/identify/identify.go @@ -16,7 +16,7 @@ import ( "crypto/sha512" "hash" - proto "code.google.com/p/goprotobuf/proto" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ci "github.com/jbenet/go-ipfs/crypto" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" diff --git a/identify/message.pb.go b/identify/message.pb.go index 9313fc1c6..bd373c6e9 100644 --- a/identify/message.pb.go +++ b/identify/message.pb.go @@ -14,7 +14,7 @@ It has these top-level messages: */ package identify -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/merkledag/coding.go b/merkledag/coding.go index e27f03acc..45142ac47 100644 --- a/merkledag/coding.go +++ b/merkledag/coding.go @@ -3,7 +3,7 @@ package merkledag import ( "fmt" - mh "github.com/jbenet/go-multihash" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) // for now, we use a PBNode intermediate thing. diff --git a/merkledag/dagreader.go b/merkledag/dagreader.go index 5fca2f9f4..0aa0d2606 100644 --- a/merkledag/dagreader.go +++ b/merkledag/dagreader.go @@ -5,7 +5,7 @@ import ( "errors" "io" - proto "code.google.com/p/goprotobuf/proto" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" u "github.com/jbenet/go-ipfs/util" ) diff --git a/merkledag/data.pb.go b/merkledag/data.pb.go index 7b85d2903..d2f97d33f 100644 --- a/merkledag/data.pb.go +++ b/merkledag/data.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package merkledag -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index 479959aea..accebb708 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -3,12 +3,12 @@ package merkledag import ( "fmt" - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" blocks "github.com/jbenet/go-ipfs/blocks" bserv "github.com/jbenet/go-ipfs/blockservice" u "github.com/jbenet/go-ipfs/util" - mh "github.com/jbenet/go-multihash" ) // NodeMap maps u.Keys to Nodes. diff --git a/merkledag/node.pb.go b/merkledag/node.pb.go index bbfdbcdd2..f7925c9d9 100644 --- a/merkledag/node.pb.go +++ b/merkledag/node.pb.go @@ -14,14 +14,14 @@ */ package merkledag -import proto "code.google.com/p/gogoprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import math "math" // discarding unused import gogoproto "code.google.com/p/gogoprotobuf/gogoproto/gogo.pb" import io "io" import fmt "fmt" -import code_google_com_p_gogoprotobuf_proto "code.google.com/p/gogoprotobuf/proto" +import code_google_com_p_gogoprotobuf_proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import fmt1 "fmt" import strings "strings" @@ -29,7 +29,7 @@ import reflect "reflect" import fmt2 "fmt" import strings1 "strings" -import code_google_com_p_gogoprotobuf_proto1 "code.google.com/p/gogoprotobuf/proto" +import code_google_com_p_gogoprotobuf_proto1 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import sort "sort" import strconv "strconv" import reflect1 "reflect" diff --git a/merkledag/nodepb_test.go b/merkledag/nodepb_test.go index b7ef81c3d..103ab986f 100644 --- a/merkledag/nodepb_test.go +++ b/merkledag/nodepb_test.go @@ -17,7 +17,7 @@ package merkledag import testing "testing" import math_rand "math/rand" import time "time" -import code_google_com_p_gogoprotobuf_proto "code.google.com/p/gogoprotobuf/proto" +import code_google_com_p_gogoprotobuf_proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import testing1 "testing" import math_rand1 "math/rand" import time1 "time" @@ -25,7 +25,7 @@ import encoding_json "encoding/json" import testing2 "testing" import math_rand2 "math/rand" import time2 "time" -import code_google_com_p_gogoprotobuf_proto1 "code.google.com/p/gogoprotobuf/proto" +import code_google_com_p_gogoprotobuf_proto1 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import math_rand3 "math/rand" import time3 "time" import testing3 "testing" @@ -33,7 +33,7 @@ import fmt "fmt" import math_rand4 "math/rand" import time4 "time" import testing4 "testing" -import code_google_com_p_gogoprotobuf_proto2 "code.google.com/p/gogoprotobuf/proto" +import code_google_com_p_gogoprotobuf_proto2 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" import math_rand5 "math/rand" import time5 "time" import testing5 "testing" @@ -42,7 +42,7 @@ import go_parser "go/parser" import math_rand6 "math/rand" import time6 "time" import testing6 "testing" -import code_google_com_p_gogoprotobuf_proto3 "code.google.com/p/gogoprotobuf/proto" +import code_google_com_p_gogoprotobuf_proto3 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" func TestPBLinkProto(t *testing.T) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) diff --git a/path/path.go b/path/path.go index ffcfd47c2..a06fb98cb 100644 --- a/path/path.go +++ b/path/path.go @@ -5,9 +5,9 @@ import ( "path" "strings" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" merkledag "github.com/jbenet/go-ipfs/merkledag" u "github.com/jbenet/go-ipfs/util" - mh "github.com/jbenet/go-multihash" ) // Resolver provides path resolution to IPFS diff --git a/peer/peer.go b/peer/peer.go index 3f1550e76..1d270450d 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -4,11 +4,11 @@ import ( "sync" "time" - b58 "github.com/jbenet/go-base58" + b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ic "github.com/jbenet/go-ipfs/crypto" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" - mh "github.com/jbenet/go-multihash" "bytes" ) diff --git a/peer/peer_test.go b/peer/peer_test.go index 133acc221..e254c403d 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -1,8 +1,8 @@ package peer import ( - ma "github.com/jbenet/go-multiaddr" - mh "github.com/jbenet/go-multihash" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" "testing" ) diff --git a/routing/dht/Message.go b/routing/dht/Message.go index 4163fb485..21bd26a85 100644 --- a/routing/dht/Message.go +++ b/routing/dht/Message.go @@ -1,7 +1,7 @@ package dht import ( - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" peer "github.com/jbenet/go-ipfs/peer" ) diff --git a/routing/dht/dht.go b/routing/dht/dht.go index 883fec333..83962e210 100644 --- a/routing/dht/dht.go +++ b/routing/dht/dht.go @@ -12,11 +12,11 @@ import ( swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - ds "github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) // TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js diff --git a/routing/dht/dht_test.go b/routing/dht/dht_test.go index 2997d3797..9e14987d8 100644 --- a/routing/dht/dht_test.go +++ b/routing/dht/dht_test.go @@ -3,13 +3,13 @@ package dht import ( "testing" - ds "github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ci "github.com/jbenet/go-ipfs/crypto" identify "github.com/jbenet/go-ipfs/identify" peer "github.com/jbenet/go-ipfs/peer" swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" "fmt" "time" diff --git a/routing/dht/ext_test.go b/routing/dht/ext_test.go index 2b5f3ff72..82337bfa6 100644 --- a/routing/dht/ext_test.go +++ b/routing/dht/ext_test.go @@ -5,13 +5,13 @@ import ( crand "crypto/rand" - "code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - ds "github.com/jbenet/datastore.go" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" peer "github.com/jbenet/go-ipfs/peer" swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" "time" ) diff --git a/routing/dht/messages.pb.go b/routing/dht/messages.pb.go index a2452dc28..90c936eb9 100644 --- a/routing/dht/messages.pb.go +++ b/routing/dht/messages.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package dht -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/routing/dht/routing.go b/routing/dht/routing.go index 31a99def6..383c64a98 100644 --- a/routing/dht/routing.go +++ b/routing/dht/routing.go @@ -6,9 +6,9 @@ import ( "errors" "time" - proto "code.google.com/p/goprotobuf/proto" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - ma "github.com/jbenet/go-multiaddr" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" peer "github.com/jbenet/go-ipfs/peer" kb "github.com/jbenet/go-ipfs/routing/kbucket" diff --git a/swarm/conn.go b/swarm/conn.go index 2283db482..1f2df1658 100644 --- a/swarm/conn.go +++ b/swarm/conn.go @@ -4,10 +4,10 @@ import ( "fmt" "net" + msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - msgio "github.com/jbenet/go-msgio" - ma "github.com/jbenet/go-multiaddr" ) // ChanBuffer is the size of the buffer in the Conn Chan diff --git a/swarm/conn_test.go b/swarm/conn_test.go index 171d9c38b..952434acf 100644 --- a/swarm/conn_test.go +++ b/swarm/conn_test.go @@ -2,9 +2,9 @@ package swarm import ( "fmt" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" peer "github.com/jbenet/go-ipfs/peer" - ma "github.com/jbenet/go-multiaddr" - mh "github.com/jbenet/go-multihash" "net" "testing" ) diff --git a/swarm/interface.go b/swarm/interface.go index c9655a91c..413a42ee2 100644 --- a/swarm/interface.go +++ b/swarm/interface.go @@ -4,7 +4,7 @@ import ( peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) type Network interface { diff --git a/swarm/mes_wrapper.pb.go b/swarm/mes_wrapper.pb.go index dd277b538..f218a448a 100644 --- a/swarm/mes_wrapper.pb.go +++ b/swarm/mes_wrapper.pb.go @@ -13,7 +13,7 @@ It has these top-level messages: */ package swarm -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/swarm/swarm.go b/swarm/swarm.go index 79efd65af..389453703 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -6,11 +6,11 @@ import ( "net" "sync" - proto "code.google.com/p/goprotobuf/proto" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ident "github.com/jbenet/go-ipfs/identify" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-multiaddr" ) var ErrAlreadyOpen = errors.New("Error: Connection to this peer already open.") diff --git a/swarm/swarm_test.go b/swarm/swarm_test.go index e27ac0b53..e8a7af50d 100644 --- a/swarm/swarm_test.go +++ b/swarm/swarm_test.go @@ -5,9 +5,9 @@ import ( "net" "testing" + msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - msgio "github.com/jbenet/go-msgio" ) func pingListen(listener *net.TCPListener, peer *peer.Peer) { diff --git a/swarm/wrapper.go b/swarm/wrapper.go index 52ffc7765..469620e8b 100644 --- a/swarm/wrapper.go +++ b/swarm/wrapper.go @@ -1,6 +1,6 @@ package swarm -import "code.google.com/p/goprotobuf/proto" +import "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" func Wrap(data []byte, typ PBWrapper_MessageType) ([]byte, error) { wrapper := new(PBWrapper) diff --git a/util/util.go b/util/util.go index 50eb31e8f..56a80b64b 100644 --- a/util/util.go +++ b/util/util.go @@ -7,8 +7,8 @@ import ( "os/user" "strings" - b58 "github.com/jbenet/go-base58" - mh "github.com/jbenet/go-multihash" + b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) // Debug is a global flag for debugging. diff --git a/util/util_test.go b/util/util_test.go index 6a269dbb5..821a97e66 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -2,7 +2,7 @@ package util import ( "bytes" - mh "github.com/jbenet/go-multihash" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" "testing" )