소스 검색

Add Vendor

Luis Figueiredo 8 년 전
부모
커밋
25b8bf906d
100개의 변경된 파일20567개의 추가작업 그리고 4개의 파일을 삭제
  1. 0 1
      backend/.gitignore
  2. 4 3
      backend/Makefile
  3. 12 0
      backend/src/vendor/dev.hexasoftware.com/hxs/prettylog.git/global/register.go
  4. 55 0
      backend/src/vendor/dev.hexasoftware.com/hxs/prettylog.git/prettylog.go
  5. 201 0
      backend/src/vendor/github.com/gohxs/hqi/LICENSE
  6. 175 0
      backend/src/vendor/github.com/gohxs/hqi/README.md
  7. 120 0
      backend/src/vendor/github.com/gohxs/hqi/builder.go
  8. 19 0
      backend/src/vendor/github.com/gohxs/hqi/doc/CHANGELOG.md
  9. 165 0
      backend/src/vendor/github.com/gohxs/hqi/doc/IDEAS.md
  10. 16 0
      backend/src/vendor/github.com/gohxs/hqi/driver.go
  11. 136 0
      backend/src/vendor/github.com/gohxs/hqi/drv/driver_test.go
  12. 137 0
      backend/src/vendor/github.com/gohxs/hqi/drv/mgodrv/executor.go
  13. 61 0
      backend/src/vendor/github.com/gohxs/hqi/drv/mgodrv/mgodrv.go
  14. 93 0
      backend/src/vendor/github.com/gohxs/hqi/drv/slicedrv/executor.go
  15. 102 0
      backend/src/vendor/github.com/gohxs/hqi/drv/slicedrv/slicedrv.go
  16. 107 0
      backend/src/vendor/github.com/gohxs/hqi/drv/slicedrv/utils.go
  17. 120 0
      backend/src/vendor/github.com/gohxs/hqi/drv/sqldrv/driver.go
  18. 185 0
      backend/src/vendor/github.com/gohxs/hqi/drv/sqldrv/executor.go
  19. BIN
      backend/src/vendor/github.com/gohxs/hqi/drv/tmp.sqlite3
  20. 31 0
      backend/src/vendor/github.com/gohxs/hqi/executor.go.bak
  21. 141 0
      backend/src/vendor/github.com/gohxs/hqi/query.go
  22. 106 0
      backend/src/vendor/github.com/gohxs/hqi/samples/main.go
  23. 144 0
      backend/src/vendor/github.com/gohxs/hqi/tester/tester.go
  24. 52 0
      backend/src/vendor/github.com/gohxs/hqi/tester/testutils.go
  25. 9 0
      backend/src/vendor/github.com/google/uuid/.travis.yml
  26. 10 0
      backend/src/vendor/github.com/google/uuid/CONTRIBUTING.md
  27. 9 0
      backend/src/vendor/github.com/google/uuid/CONTRIBUTORS
  28. 27 0
      backend/src/vendor/github.com/google/uuid/LICENSE
  29. 23 0
      backend/src/vendor/github.com/google/uuid/README.md
  30. 80 0
      backend/src/vendor/github.com/google/uuid/dce.go
  31. 12 0
      backend/src/vendor/github.com/google/uuid/doc.go
  32. 53 0
      backend/src/vendor/github.com/google/uuid/hash.go
  33. 62 0
      backend/src/vendor/github.com/google/uuid/json_test.go
  34. 39 0
      backend/src/vendor/github.com/google/uuid/marshal.go
  35. 103 0
      backend/src/vendor/github.com/google/uuid/node.go
  36. 66 0
      backend/src/vendor/github.com/google/uuid/seq_test.go
  37. 59 0
      backend/src/vendor/github.com/google/uuid/sql.go
  38. 113 0
      backend/src/vendor/github.com/google/uuid/sql_test.go
  39. 123 0
      backend/src/vendor/github.com/google/uuid/time.go
  40. 43 0
      backend/src/vendor/github.com/google/uuid/util.go
  41. 191 0
      backend/src/vendor/github.com/google/uuid/uuid.go
  42. 526 0
      backend/src/vendor/github.com/google/uuid/uuid_test.go
  43. 44 0
      backend/src/vendor/github.com/google/uuid/version1.go
  44. 38 0
      backend/src/vendor/github.com/google/uuid/version4.go
  45. 45 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/.travis.yml
  46. 25 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/LICENSE
  47. 5 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/Makefile
  48. 4 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/README.md
  49. 467 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/auth.go
  50. 1180 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/auth_test.go
  51. 25 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/LICENSE
  52. 738 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/bson.go
  53. 1832 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/bson_test.go
  54. 310 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/decimal.go
  55. 4109 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/decimal_test.go
  56. 849 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/decode.go
  57. 514 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/encode.go
  58. 380 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/json.go
  59. 184 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/json_test.go
  60. 27 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/specdata/update.sh
  61. 241 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/specdata_test.go
  62. 351 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bulk.go
  63. 504 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/bulk_test.go
  64. 682 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/cluster.go
  65. 2090 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/cluster_test.go
  66. 196 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/dbtest/dbserver.go
  67. 108 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/dbtest/dbserver_test.go
  68. 12 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/dbtest/export_test.go
  69. 31 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/doc.go
  70. 33 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/export_test.go
  71. 761 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/gridfs.go
  72. 708 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/gridfs_test.go
  73. 20 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.crt
  74. 27 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.key
  75. 57 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.pem
  76. 17 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.req
  77. 22 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/server.crt
  78. 28 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/server.key
  79. 50 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/server.pem
  80. 57 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/.env
  81. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/db/.empty
  82. BIN
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/db/journal/tempLatencyTest
  83. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/db/mongod.lock
  84. 3 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/log/run
  85. 8 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/run
  86. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg2/db/.empty
  87. 3 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg2/log/run
  88. 8 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg2/run
  89. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg3/db/.empty
  90. 3 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg3/log/run
  91. 9 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg3/run
  92. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db1/db/.empty
  93. 3 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db1/log/run
  94. 15 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db1/run
  95. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db2/db/.empty
  96. 3 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db2/log/run
  97. 8 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db2/run
  98. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db3/db/.empty
  99. 3 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db3/log/run
  100. 0 0
      backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db3/run

+ 0 - 1
backend/.gitignore

@@ -1,4 +1,3 @@
 pkg
 deps
 bin
-src/vendor

+ 4 - 3
backend/Makefile

@@ -1,15 +1,16 @@
 
+BIN=../DIST/bin/server
 
 all: build
 
 build:
-	GOPATH=${CURDIR}/deps:${CURDIR} GO_ENABLED=0 go build -o ../DIST/bin/server src/simple-web/simple-web.go
+	GOPATH=${CURDIR} GO_ENABLED=0 go build -o ${BIN} src/simple-web/simple-web.go
 	@#GOPATH=${CURDIR} GO_ENABLED=0 go build -o ../DIST/bin/server src/simple-web/simple-web.go
 
 # Previously	
 deps:
-	@mkdir -p deps
-	GOPATH=${CURDIR}/deps:${CURDIR} go get -v simple-web
+	#@mkdir -p deps
+	#GOPATH=${CURDIR}/deps:${CURDIR} go get -v simple-web
 
 test:
 	cd ../DIST;tmux split "DBHOST=localhost PORT=8080 DEVMODE=1 bin/server"

+ 12 - 0
backend/src/vendor/dev.hexasoftware.com/hxs/prettylog.git/global/register.go

@@ -0,0 +1,12 @@
+package global
+
+import (
+	"log"
+
+	prettylog "dev.hexasoftware.com/hxs/prettylog.git"
+)
+
+func init() {
+	log.SetFlags(0)
+	log.SetOutput(prettylog.New())
+}

+ 55 - 0
backend/src/vendor/dev.hexasoftware.com/hxs/prettylog.git/prettylog.go

@@ -0,0 +1,55 @@
+package prettylog
+
+import (
+	"fmt"
+	"log"
+	"runtime"
+	"strings"
+	"time"
+)
+
+type PrettyLogWritter struct {
+	lastTime time.Time
+	counter  int64
+}
+
+func New() *PrettyLogWritter {
+	return &PrettyLogWritter{time.Now(), 0}
+}
+
+func (this *PrettyLogWritter) Write(b []byte) (int, error) {
+
+	/*{
+		for i := 0; i < 6; i++ {
+			ptr, _, _, _ := runtime.Caller(i)
+			fname := runtime.FuncForPC(ptr).Name()
+			fmt.Println("Stack:", fname)
+		}
+	}*/
+
+	ptr, _, line, _ := runtime.Caller(3)
+	tname := runtime.FuncForPC(ptr).Name()
+	li := strings.LastIndex(tname, "/")
+	fname := tname[li+1:]
+
+	timeDiff := time.Since(this.lastTime)
+
+	var fduration float64 = float64(timeDiff.Nanoseconds()) / 1000000.0
+
+	msg := fmt.Sprintf("[%d:\033[34m%s\033[0m (\033[33m%s:%d\033[0m) \033[90m+%.2f/ms\033[0m]: %s",
+		this.counter,
+		time.Now().Format("2006-01-02 15:04:05"),
+		fname,
+		line,
+		fduration,
+		string(b),
+	)
+	this.lastTime = time.Now()
+	this.counter++
+
+	return fmt.Print(msg)
+}
+
+func CreateLogger() *log.Logger {
+	return log.New(New(), "", 0)
+}

+ 201 - 0
backend/src/vendor/github.com/gohxs/hqi/LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 175 - 0
backend/src/vendor/github.com/gohxs/hqi/README.md

@@ -0,0 +1,175 @@
+hqi 
+==========================
+hexa query interface
+
+
+----
+Stage based criterian
+It will return the Proper stage for a flat query
+
+
+Introduction
+---------------------------------
+hqi means to interface a flat table query into a common solution
+
+
+- [IDEAS](/doc/IDEAS.md)
+- [CHANGELOG](/doc/CHANGELOG.md)
+
+
+
+
+#### TODO
+
+* Create driver capabilitie struct so if we are registering a subDocument driver will identify if it can process
+* Finder features with operators >= <= < > !=
+* Update operation 
+* Matcher with Greater,Smaller checks, Zero fields
+* Create a cursor to fetch parcels of data
+
+
+#### DONE
+
+* Schemer (information to pass to driver when user wants a scheme) (staging)
+* Insert operation (staging)
+* Delete (staging)
+
+
+
+### Finder
+
+- OR
+	```go
+// will perform obj1 OR obj2
+q.Find(obj1,obj2)   
+```
+- AND
+	```go
+// will perform  f1 = 1 AND f2 = 2
+q.Find(hqi.M{"f1":1,"f2":2})    
+// Will delete like (name = 'aaa' AND value = 5) OR (name = 'bbb')
+q.Find(hqi.M{"name":"aaa","value":5},hqi.M{"name":"bbb"}).Delete()
+```
+
+- Require complex filter
+- Easy to add
+
+#### Operations
+
+oper   | function               | equivalence
+-------|------------------------|------------
+NOT    | Inverse match          | !
+GT     | Greater Than           |  >
+GTE    | Greater equal or equal | >=
+LT     | Less than              | <
+LTE    | Less than or equal     | <
+
+
+
+
+
+
+Matcher executor
+--------------------------
+* Match
+* Sort
+* Range
+* Retrieve, Count,Delete,Update (CRUD here)
+
+
+Stage based query
+-------------------------
+
+On stage based means, that if we perform a 
+query. means we should have only Find option/Match  
+After find we can have SORT, LIMIT,RESULT  
+after SORT: LIMIT,RESULT  
+after RANGE: RESULT  
+
+* Stage1 return Find/Schema/Insert (Match)
+* Stage2 sort, range, results 
+* Stage3 All/Delete/Count executors
+
+
+#### QuerySampler
+
+rules for Find, if we have a model:
+
+```go
+type Person struct {
+	Name string
+	Age int
+}
+```
+
+we could create a "cloned" Sampler as:
+
+```go
+type PersonSampler struct {
+	Name interface{}
+	Age interface{}
+}
+
+q.FindS(PersonSampler{Age:0}).  // Usually 0 is nothing and won't match
+  List(&res)
+// Or also:
+q.FindS(PersonSampler{Age:hqi.Greater(10)}).
+  List(&res)
+
+```
+
+
+
+
+
+#### Internal operation (done)
+
+Samples could be stored in map[string]interface{} to define fields, executors  
+would read from these fields and do the operations,   
+Right now there is no way for a searcher to compary something to 0   
+
+#### Executor vs Query
+
+was experimenting to change The executor back to  one function to make the implementation less complex
+as it now user must to create the driver with some funcitons and executor with others
+Single Implementation in one struct would be better
+
+
+#### Possibilities
+
+Imagine a common DB struct with several data tables
+
+and a Frontend that we would be able to execute standard SQL
+
+```go
+db := hdb.PrepareDB();
+db.Collection("User",mgodrv.Driver{d.DB("dbtest").C("user")})
+db.Collection("Things",slicedrv.Driver{[]MySlice{}})
+db.Collection("orders",sqldrv.Driver{sql.Open("postgres","dsn")})
+db.Collection("invoices",restdrv.Driver{"http://api.domain.tld"})
+
+rows,err := db.Query("select * from User")
+rows.Next()
+
+```
+
+
+
+
+03-02-2017
+----
+### Added
+- Implemented Delete in drivers
+
+### Changed
+- Chaged Range to Limit
+- Package is now named as hqi, changed l from language to i interface (hexasoftware|High|hyper) query interface
+- Sort no longer requires two fields Prefix is - would also be > greater to smaller?  before:
+  ```go
+//Before:
+q.Find().SortAsc("field1","field2").SortDesc("field3").SortAsc("field4")
+// Now:
+q.Find().Sort("field1","field2","-field3","field4")
+```
+
+

+ 120 - 0
backend/src/vendor/github.com/gohxs/hqi/builder.go

@@ -0,0 +1,120 @@
+package hqi
+
+//Param builder
+
+//SecondStage Second stage where dev can Sort or do the rest
+type (
+	FirstStage interface {
+		Where(samples ...interface{})
+		SecondStage
+	}
+	SecondStage interface { // Or first
+		Sort(...string) ThirdStage
+		ThirdStage
+	}
+	//ThirdStage Third stage to slice data
+	ThirdStage interface {
+		Skip(n int) ThirdStage
+		Max(n int) ThirdStage
+		Limit(skip, max int) FinalStage
+		FinalStage
+	}
+	//FinalStage  the stage to fetch data
+	FinalStage interface {
+		//One(res interface{}) error  // Cursor instead?
+		List(res interface{}) error // cursor instead?
+		Delete() error
+		Count() int
+	}
+
+	//ExecFunc the main handler func
+	//ExecFunc func(qd *BuilderData, res interface{}) error
+
+	// QueryParam flat Query information
+	QueryParam struct {
+		Samples []M
+		Sort    []Field
+		Max     int
+		Skip    int
+	}
+)
+
+/*
+func (bd *BuilderData) Execute(e Executor, res interface{}) error {
+	e.Match(bd.Samples)
+	e.Sort(bd.Sort)
+	e.Range(bd.Skip, bd.Max)
+	e.Retrieve(res)
+	return nil
+}*/
+
+// Executor the main executor
+type Builder struct {
+	driver Driver
+	//Executor Factory
+	//Executor DriverFactory
+	data QueryParam
+
+	// Hide this
+	//Driver   Driver
+}
+
+func (b *Builder) Sort(fields ...string) ThirdStage {
+	for _, v := range fields {
+		if v[0] == byte('-') {
+			b.data.Sort = append(b.data.Sort, Field{Name: v[1:], Value: SortDesc})
+			continue
+		}
+		b.data.Sort = append(b.data.Sort, Field{Name: v, Value: SortAsc})
+	}
+	return &*b // Return copy
+}
+
+// Ranger or Skipper,
+func (b *Builder) Skip(n int) ThirdStage {
+	b.data.Skip = n
+	return &*b
+}
+
+// Maxer
+func (b *Builder) Max(n int) ThirdStage {
+	b.data.Max = n
+	return &*b
+}
+
+func (b *Builder) Limit(fi, li int) FinalStage {
+	b.data.Skip = fi
+	if li != 0 {
+		b.data.Max = li - fi
+	}
+	return &*b // builder copy?
+}
+
+func (b *Builder) List(res interface{}) error {
+	return b.driver.Query(&b.data, res)
+
+	/*e := b.driver.Executor()
+	return b.data.Execute(e, res) /**/
+
+	// Exec func
+}
+func (b *Builder) Delete() error {
+	return b.driver.Delete(&b.data)
+}
+func (b *Builder) Count() int {
+	return b.driver.Count(&b.data)
+}
+
+/*
+func (b *Builder) Count() int {
+	b.data.ResultKind = ResultCount
+	var count int
+
+	e := b.driver.Executor()
+
+	err := b.data.Execute(e, &count)
+	if err != nil {
+		return -1
+	}
+	return count
+}*/

+ 19 - 0
backend/src/vendor/github.com/gohxs/hqi/doc/CHANGELOG.md

@@ -0,0 +1,19 @@
+
+
+03-02-2017
+----
+### Added
+- Implemented Delete in drivers
+
+### Changed
+- Chaged Range to Limit
+- Package is now named as hqi, changed l from language to i interface (hexasoftware|High|hyper) query interface
+- Sort no longer requires two fields Prefix is - would also be > greater to smaller?  before:
+  ```go
+//Before:
+q.Find().SortAsc("field1","field2").SortDesc("field3").SortAsc("field4")
+// Now:
+q.Find().Sort("field1","field2","-field3","field4")
+```
+
+

+ 165 - 0
backend/src/vendor/github.com/gohxs/hqi/doc/IDEAS.md

@@ -0,0 +1,165 @@
+Sample ideas
+-----------------------
+
+Create Driver capabilities to identify driver for example:
+Mgo,slice supports subDocuments/slices, SQL/Cassandra just flat tables
+
+
+
+Sample query:
+Considering 
+```go
+type Person struct {
+	Name string
+	Age int
+	HairColor string
+	Gender string
+}
+```
+I want Females with dark hair age above 20
+and if age > 30 blonde hair
+
+Person 
+  (Gender == "f" && Age > 20 && Age < 30 && HairColor == "dark") ||
+  (Gender == "f" && Age >= 30 && HairColor == "blond")
+
+Could also be:
+Gender == "F" && ((Age >20 && Age < 30 && HAirColor == "dark") || (Age >= 30 && HairColor = "blond"))
+
+```go
+q.Find(PersonS{
+	Gender:"f",
+	HairColor:"dark",
+	Age: hqi.AND(hqi.GT(20),hqi.LT(30)),
+  },
+PersonS{
+	Gender:"f",
+	HairColor:"dark",
+	Age: hci.GT(30),
+}).List(...)
+```
+
+
+
+
+
+#### Simplifying matcher:
+
+
+
+```go
+
+//Filter
+q.Find(hqi.M{"Value>":10})
+q.Find(hqi.GT("Value",10))
+q.Find(hqi.M("Value",hqi.GT(10))
+q.Find(hqi.O("Value",">",10))
+
+//And operation
+
+//Finding a Person older than  20 and below 40 study
+
+q.Find( hqi.AND(hqi.F{"Age>":20},hqi.F{"Age<":40}) )
+q.Find( hqi.M{"Age",hqi.AND(hqi.GT(20),hqi.LT(10))})
+q.Find("Age < 10 AND Age > 20") // Needs a parser
+
+
+
+```
+
+
+
+
+
+
+
+#### Different way to perform queries:
+
+```go
+	q.Select(&res).      // This could be a struct{Name string}
+		Where(samples...).
+		Sort("field").
+		Limit(1).  // ...int
+		Exec() // We know that will execute here
+```
+this way we can change parts independently and output for different structs?:
+```go
+type Model {
+	Name string
+	Value int
+}
+type ModelName {
+	Name string
+}
+
+qry := q.Select("Name")
+qry.Where({"Test":"1"}).Exec()
+
+qry.Where({"Test":"2"}).Exec()
+```
+
+#### QuerySampler
+
+rules for Find, if we have a model:
+
+```go
+type Person struct {
+	Name string
+	Age int
+}
+```
+
+we could create a "cloned" Sampler as:
+
+```go
+type PersonSampler struct {
+	Name interface{}
+	Age interface{}
+}
+
+q.FindS(PersonSampler{Age:0}).  // Usually 0 is nothing and won't match
+  List(&res)
+// Or also:
+q.FindS(PersonSampler{Age:gocql.Greater(10)}).
+  List(&res)
+
+```
+
+
+
+
+
+#### Internal operation (done)
+
+Samples could be stored in map[string]interface{} to define fields, executors  
+would read from these fields and do the operations,   
+Right now there is no way for a searcher to compary something to 0   
+
+#### Executor vs Query
+
+was experimenting to change The executor back to  one function to make the implementation less complex
+as it now user must to create the driver with some funcitons and executor with others
+Single Implementation in one struct would be better
+
+
+#### Possibilities
+
+Imagine a common DB struct with several data tables
+
+and a Frontend that we would be able to execute standard SQL
+
+```go
+db := hdb.PrepareDB();
+db.Collection("User",mgodrv.Driver{d.DB("dbtest").C("user")})
+db.Collection("Things",slicedrv.Driver{[]MySlice{}})
+db.Collection("orders",sqldrv.Driver{sql.Open("postgres","dsn")})
+db.Collection("invoices",restdrv.Driver{"http://api.domain.tld"})
+
+rows,err := db.Query("select * from User")
+rows.Next()
+
+```
+
+
+
+

+ 16 - 0
backend/src/vendor/github.com/gohxs/hqi/driver.go

@@ -0,0 +1,16 @@
+package hqi
+
+//Driver creates executors and initialize what is needed
+type Driver interface {
+	Schema(obj interface{}) error
+	Insert(obj ...interface{}) error
+
+	// Queryier or Finder
+	Query(qp *QueryParam, res interface{}) error
+	Delete(qp *QueryParam) error
+	Count(qp *QueryParam) int // can error too
+
+	// New: update all matches in query to obj
+	Update(qp *QueryParam, obj interface{}) error
+	//Executor() Executor // Creates executor per query
+}

+ 136 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/driver_test.go

@@ -0,0 +1,136 @@
+package drv_test
+
+import (
+	"database/sql"
+
+	"os"
+	"testing"
+
+	"github.com/gohxs/hqi"
+	"github.com/gohxs/hqi/drv/mgodrv"
+	"github.com/gohxs/hqi/drv/slicedrv"
+	"github.com/gohxs/hqi/drv/sqldrv"
+	"github.com/gohxs/hqi/tester"
+	_ "github.com/lib/pq"
+	_ "github.com/mattn/go-sqlite3"
+	mgo "gopkg.in/mgo.v2-unstable"
+)
+
+// Run full tests on several drivers
+
+func TestMGO(t *testing.T) {
+	getDriver := func() hqi.Driver {
+		session, err := mgo.Dial("mongodb://localhost/mgo-test")
+		if err != nil {
+			t.Error("MGO connection fail")
+			return nil
+		}
+		coll := session.DB("mgo-test").C("hqitest")
+		coll.DropCollection()
+
+		return &mgodrv.Driver{coll}
+	}
+	tester.Test(t, getDriver)
+}
+func BenchmarkMGO(b *testing.B) {
+	getDriver := func() hqi.Driver {
+		session, err := mgo.Dial("mongodb://admin:1q2w3e@localhost/mgo-test")
+		if err != nil {
+			b.Error("MGO connection fail")
+			return nil
+		}
+		coll := session.DB("mgo-test").C("hqitest")
+		coll.DropCollection()
+
+		return &mgodrv.Driver{coll}
+	}
+	tester.Benchmark(b, getDriver)
+}
+
+func TestSLICE(t *testing.T) {
+	getDriver := func() hqi.Driver {
+		return &slicedrv.Driver{[]struct{}{}}
+	}
+	tester.Test(t, getDriver)
+}
+func BenchmarkSLICE(b *testing.B) {
+	getDriver := func() hqi.Driver {
+		return &slicedrv.Driver{[]struct{}{}}
+	}
+	tester.Benchmark(b, getDriver)
+
+}
+
+func TestSQLpg(t *testing.T) {
+	getDriver := func() hqi.Driver {
+		db, err := sql.Open("postgres", "user=admin dbname=hqitest sslmode=disable")
+		if err != nil {
+			t.Error("PG connection fail", err)
+			return nil
+		}
+		_, err = db.Exec("DROP TABLE IF EXISTS hqitest")
+		if err != nil {
+			t.Error("PG FAIL")
+			return nil
+		}
+
+		return &sqldrv.Driver{db, "hqitest"}
+	}
+	tester.Test(t, getDriver)
+}
+func BenchmarkSQLpg(b *testing.B) {
+	getDriver := func() hqi.Driver {
+		db, err := sql.Open("postgres", "user=admin dbname=hqitest sslmode=disable")
+		if err != nil {
+			b.Error("PG connection fail")
+			return nil
+		}
+		_, err = db.Exec("DROP TABLE IF EXISTS hqitest")
+		if err != nil {
+			b.Error("PG FAIL")
+			return nil
+		}
+
+		return &sqldrv.Driver{db, "hqitest"}
+	}
+	tester.Benchmark(b, getDriver)
+}
+
+func TestSQLlite(t *testing.T) {
+	getDriver := func() hqi.Driver {
+		db, err := sql.Open("sqlite3", "tmp.sqlite3")
+		if err != nil {
+			t.Error("Sqlite fail")
+			return nil
+		}
+		_, err = db.Exec("DROP TABLE IF EXISTS hqitest")
+		if err != nil {
+			t.Error("Sqlite fail")
+			return nil
+		}
+
+		return &sqldrv.Driver{db, "hqitest"}
+	}
+	tester.Test(t, getDriver)
+	//os.Remove("tmp.sqlite3")
+}
+
+func BenchmarkSQLlite(b *testing.B) {
+	getDriver := func() hqi.Driver {
+		db, err := sql.Open("sqlite3", "tmp.sqlite3")
+		if err != nil {
+			b.Error("Sqlite fail")
+			return nil
+		}
+		_, err = db.Exec("DROP TABLE IF EXISTS hqitest")
+		if err != nil {
+			b.Error("Sqlite fail")
+			return nil
+		}
+
+		return &sqldrv.Driver{db, "hqitest"}
+	}
+	tester.Benchmark(b, getDriver)
+
+	os.Remove("tmp.sqlite3")
+}

+ 137 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/mgodrv/executor.go

@@ -0,0 +1,137 @@
+package mgodrv
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/gohxs/hqi"
+
+	mgo "gopkg.in/mgo.v2-unstable"
+	"gopkg.in/mgo.v2-unstable/bson"
+)
+
+type Executor struct {
+	driver *Driver
+	mq     *mgo.Query
+
+	// Query information
+	filter    bson.M
+	sort      []string
+	skip, max int
+}
+
+// Convert hqiMap to bsonMap
+func hm2bm(obj hqi.M) bson.M {
+	var ret = bson.M{}
+	for k, v := range obj { // Issue
+		switch xt := v.(type) {
+		case []hqi.M:
+			barr := []bson.M{} // Array to be placed on and
+			for _, v := range xt {
+				bsub := hm2bm(v)
+				bret := bson.M{}
+				for k2, v2 := range bsub {
+					bret[strings.ToLower(k)+"."+k2] = v2
+				}
+				barr = append(barr, bret)
+			}
+			ret["$and"] = barr
+		case hqi.M: // subobject
+			bsub := hm2bm(xt)
+			for k2, v2 := range bsub {
+				ret[strings.ToLower(k)+"."+k2] = v2
+			}
+		case map[string]interface{}: // this should be on Query
+			bsub := hm2bm(hqi.M(xt))
+			for k2, v2 := range bsub {
+				ret[strings.ToLower(k)+"."+k2] = v2
+			}
+		default:
+			ret[strings.ToLower(k)] = v
+		}
+		// If v is a hqi.M we should sub this
+	}
+	return ret
+}
+
+func (e *Executor) Match(samples []hqi.M) {
+	if len(samples) == 0 {
+		// filter = nil
+		e.mq = e.driver.Coll.Find(nil)
+		return
+	}
+
+	bsonSamples := []bson.M{}
+	for _, smpl := range samples { // OR
+		bSmpl := hm2bm(smpl)
+
+		bsonSamples = append(bsonSamples, bSmpl)
+	}
+
+	//log.Println("Samples:", samples)
+	//log.Println("BsonSamples:", bsonSamples)
+	// If bsonSamples is 1 we pass directly in filter, else we  do a OR
+	if len(bsonSamples) == 1 {
+		e.filter = bsonSamples[0]
+		//XXX
+		//e.mq = e.driver.Coll.Find(bsonSamples[0])
+	} else {
+		e.filter = bson.M{"$or": bsonSamples}
+		//XXX
+		//e.mq = e.driver.Coll.Find(filter)
+	}
+	//Convert M to bson.M
+	//log.Println("Coll filter:", e.filter)
+}
+
+func (e *Executor) Sort(fields []hqi.Field) {
+	var sortfields []string
+	for _, sf := range fields {
+		if sf.Value == hqi.SortDesc {
+			sortfields = append(sortfields, fmt.Sprintf("-%s", strings.ToLower(sf.Name)))
+			continue
+		}
+		sortfields = append(sortfields, fmt.Sprintf("%s", strings.ToLower(sf.Name)))
+	}
+	if len(sortfields) > 0 {
+		e.sort = sortfields
+		//XXX e.mq = e.mq.Sort(sortfields...)
+	}
+}
+
+func (e *Executor) Range(skip int, max int) {
+	e.skip = skip
+	e.max = max
+	// Leave this to retriever
+	/*if skip > 0 {
+		e.mq = e.mq.Skip(skip)
+	}
+	if max > 0 {
+		e.mq = e.mq.Limit(max)
+	}*/
+}
+
+func (e *Executor) Retrieve(res interface{}) error {
+
+	mq := e.driver.Coll.Find(e.filter)
+
+	if len(e.sort) > 0 {
+		mq = mq.Sort(e.sort...)
+	}
+	if e.skip > 0 {
+		mq = mq.Skip(e.skip)
+	}
+	if e.max > 0 {
+		mq = mq.Limit(e.max)
+	}
+	return mq.All(res)
+	//err := e.mq.All(res)
+	//if err != nil {
+	// Set error
+	//}
+}
+
+func (e *Executor) Delete() error {
+	_, err := e.driver.Coll.RemoveAll(e.filter)
+	return err
+}

+ 61 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/mgodrv/mgodrv.go

@@ -0,0 +1,61 @@
+package mgodrv
+
+import (
+	"reflect"
+
+	"github.com/gohxs/hqi"
+
+	mgo "gopkg.in/mgo.v2-unstable"
+)
+
+type Driver struct {
+	Coll *mgo.Collection
+}
+
+func (d *Driver) Schema(obj interface{}) error {
+	return nil
+}
+
+func (d *Driver) Insert(objs ...interface{}) error {
+	dObj := []interface{}{}
+	for _, obj := range objs {
+		// Unfortunetaly
+		objVal := reflect.Indirect(reflect.ValueOf(obj))
+		if reflect.TypeOf(obj).Kind() == reflect.Slice {
+			for i := 0; i < objVal.Len(); i++ {
+				dObj = append(dObj, objVal.Index(i).Interface())
+			}
+			continue
+		}
+		dObj = append(dObj, objVal.Interface())
+	}
+	return d.Coll.Insert(dObj...)
+}
+
+func (d *Driver) Query(qp *hqi.QueryParam, res interface{}) error {
+	e := &Executor{driver: d}
+
+	e.Match(qp.Samples)
+	e.Sort(qp.Sort)
+	e.Range(qp.Skip, qp.Max)
+	e.Retrieve(res)
+	return nil
+}
+
+/*func (d *Driver) Executor() hqi.Executor {
+	return &Executor{driver: d}
+}*/
+func (d *Driver) Count(qp *hqi.QueryParam) int {
+	return -1
+}
+
+func (d *Driver) Delete(qp *hqi.QueryParam) error {
+	e := &Executor{driver: d}
+	e.Match(qp.Samples)
+	return e.Delete()
+	//return hqi.ErrNotImplemented
+}
+
+func (d *Driver) Update(qp *hqi.QueryParam, obj interface{}) error {
+	return hqi.ErrNotImplemented
+}

+ 93 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/slicedrv/executor.go

@@ -0,0 +1,93 @@
+package slicedrv
+
+import (
+	"reflect"
+	"sort"
+
+	"github.com/gohxs/hqi"
+)
+
+// Exeuctor Driver way
+type Executor struct {
+	Coll interface{} // Collection
+
+	collVal reflect.Value
+	collTyp reflect.Type
+	resList reflect.Value
+}
+
+//Match matcher implementation
+func (e *Executor) Match(samples []hqi.M) {
+	if len(samples) == 0 {
+		e.resList = e.collVal
+		return
+	}
+	e.resList = reflect.MakeSlice(e.collTyp, 0, 1)
+	for i := 0; i < e.collVal.Len(); i++ {
+		vv := e.collVal.Index(i)
+		v := vv.Interface() // maybe slow :/
+
+		for _, sample := range samples {
+			//log.Println("Matching sample", sample)
+			if !sMatch(sample, v) {
+				continue
+			}
+			e.resList = reflect.Append(e.resList, vv)
+		}
+	}
+}
+
+//Sort implements Sorter
+func (e *Executor) Sort(fields []hqi.Field) {
+	if fields == nil {
+		return
+	}
+	if e.resList == e.collVal { // Prevents manipulation on original list
+		reflect.Copy(e.resList, e.collVal)
+		/*e.resList = reflect.MakeSlice(e.collTyp, 0, 1)
+		for i := 0; i < e.collVal.Len(); i++ {
+			e.resList = reflect.Append(e.resList, e.collVal.Index(i))
+		}*/
+	}
+	// if smaller should be true
+	sort.Slice(e.resList.Interface(), func(i, j int) bool {
+		for _, sf := range fields {
+			sortType := sf.Value
+			f1 := e.resList.Index(i).FieldByName(sf.Name)
+			f2 := e.resList.Index(j).FieldByName(sf.Name)
+			if !f1.IsValid() || !f2.IsValid() {
+				return false
+			}
+			ret := typeDiff(f1.Interface(), f2.Interface())
+			if ret != 0 {
+				// if smaller it will be true, then check the sortType (if it is  Desc it will inverse)
+				return (ret < 0) == (sortType == hqi.SortAsc)
+			}
+		}
+		return false // just return something
+	})
+}
+
+//Range implements Ranger
+func (e *Executor) Range(skip, max int) {
+	mlen := e.resList.Len()
+	if skip > mlen {
+		e.resList = reflect.MakeSlice(e.collTyp, 0, 0) // empty
+		return
+	}
+	cMax := mlen - skip         // Computed max
+	if max != 0 && max < cMax { //if qd.Max is before list content
+		cMax = max // Max the list qd.Max
+	}
+	e.resList = e.resList.Slice(skip, skip+cMax)
+
+}
+
+//Retrieve implements the retriever method for executor
+func (e *Executor) Retrieve(res interface{}) {
+	/*if kind == hqi.ResultCount {
+		reflect.ValueOf(res).Elem().Set(reflect.ValueOf(e.resList.Len()))
+	}*/
+	reflect.ValueOf(res).Elem().Set(e.resList)
+	// Maybe others
+}

+ 102 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/slicedrv/slicedrv.go

@@ -0,0 +1,102 @@
+package slicedrv
+
+import (
+	"reflect"
+
+	"github.com/gohxs/hqi"
+)
+
+type Driver struct {
+	// Personal info
+	CollPtr interface{} // Should be a pointer to collection
+}
+
+func (d *Driver) Schema(obj interface{}) error {
+
+	nSliceTyp := reflect.SliceOf(reflect.TypeOf(obj))
+
+	//newSlice := reflect.MakeSlice(nSliceTyp, 0, 1)
+	newSlice := reflect.New(nSliceTyp)
+
+	d.CollPtr = newSlice.Interface() // Pointer
+
+	return nil
+}
+func (d *Driver) Insert(objs ...interface{}) error {
+
+	collVal := reflect.Indirect(reflect.ValueOf(d.CollPtr))
+	for _, obj := range objs {
+		objVal := reflect.Indirect(reflect.ValueOf(obj))
+		objTyp := reflect.TypeOf(obj)
+		if objTyp.Kind() == reflect.Slice {
+			for i := 0; i < objVal.Len(); i++ {
+				collVal = reflect.Append(collVal, objVal.Index(i))
+			}
+			continue
+		}
+		collVal = reflect.Append(collVal, objVal)
+	}
+	reflect.ValueOf(d.CollPtr).Elem().Set(collVal)
+	//d.collPtr = collVal.Interface()
+	//Add objs to thing
+	return nil
+
+}
+
+func (d *Driver) Query(qp *hqi.QueryParam, res interface{}) error {
+	ex := Executor{
+		Coll:    d.CollPtr,
+		collTyp: reflect.TypeOf(d.CollPtr).Elem(),
+		collVal: reflect.Indirect(reflect.ValueOf(d.CollPtr)),
+	}
+	ex.Match(qp.Samples)
+	ex.Sort(qp.Sort)
+	ex.Range(qp.Skip, qp.Max)
+	ex.Retrieve(res)
+
+	//TODO to be implemented
+	return nil
+}
+
+func (d *Driver) Count(qp *hqi.QueryParam) int {
+	return -1
+}
+
+func (d *Driver) Delete(qp *hqi.QueryParam) error {
+	// How will this work??
+
+	//collTyp := reflect.TypeOf(d.CollPtr).Elem()
+	collVal := reflect.Indirect(reflect.ValueOf(d.CollPtr))
+
+	var resList = collVal // Back to begin so it won't mess with indexes
+	for i := resList.Len() - 1; i >= 0; i-- {
+		vv := resList.Index(i)
+		v := vv.Interface() // maybe slow :/
+		for _, sample := range qp.Samples {
+			if !sMatch(sample, v) { //if match remove
+				continue
+			}
+			resList = reflect.AppendSlice(resList.Slice(0, i),
+				resList.Slice(i+1, resList.Len()))
+		}
+	}
+	reflect.ValueOf(d.CollPtr).Elem().Set(resList)
+
+	return nil
+}
+
+func (d *Driver) Update(p *hqi.QueryParam, obj interface{}) error {
+	return hqi.ErrNotImplemented
+}
+
+// Should be named differently?
+/*func (d *Driver) Executor() hqi.Executor {
+	// coll
+	ex := Executor{
+		Coll:    d.collPtr,
+		collTyp: reflect.TypeOf(d.collPtr).Elem(),
+		collVal: reflect.Indirect(reflect.ValueOf(d.collPtr)),
+	}
+
+	return &ex
+}*/

+ 107 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/slicedrv/utils.go

@@ -0,0 +1,107 @@
+package slicedrv
+
+import (
+	"reflect"
+
+	"github.com/gohxs/hqi"
+)
+
+func isZero(x interface{}) bool {
+	return reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())
+}
+
+//nonZero Matcher
+// Extend to use map too,
+// but if it is a map we don't use 0
+func sMatch(sample hqi.M, obj2 interface{}) bool {
+	// we can cache obj1
+	val2 := reflect.ValueOf(obj2)
+	for field, value := range sample {
+		field2 := val2.FieldByName(field)
+
+		if !field2.IsValid() { //unmatched field
+			return false
+		}
+
+		//log.Println("Field :", field, "exists")
+		// Check for struct and do a submatch
+		// Deep match
+		if field2.Type().Kind() == reflect.Struct {
+			if tval, ok := value.(map[string]interface{}); ok {
+				return sMatch(tval, field2.Interface())
+			}
+			if tval, ok := value.(hqi.M); ok {
+				return sMatch(tval, field2.Interface())
+			}
+			//
+			//log.Println("It is a struct")
+			continue
+			// Same
+		}
+		//log.Println("Field2:", field2, field)
+		//Check zero too
+		if value != field2.Interface() {
+			return false
+		}
+	}
+	/*
+		//val1 := reflect.ValueOf(sample)
+		//typ1 := reflect.TypeOf(sample)
+		for i := 0; i < val1.NumField(); i++ {
+			//fieldName := typ1.Field(i).Name
+			field1 := val1.Field(i)
+			if isZero(field1.Interface()) {
+				continue
+			}
+			field2 := val2.Field(i)
+			if field1.Interface() != field2.Interface() {
+				return false
+			}
+		}*/
+	return true
+}
+
+/// Huge func
+func typeDiff(v1, v2 interface{}) int64 {
+	// check same type?
+	// Default return 0
+	switch v1.(type) {
+	case int, int8, int16, int32, int64:
+		return reflect.ValueOf(v1).Int() - reflect.ValueOf(v2).Int()
+	case uint, uint8, uint16, uint32, uint64:
+		i1, i2 := reflect.ValueOf(v1).Uint(), reflect.ValueOf(v2).Uint()
+		if i1 >= i2 {
+			return 1
+		}
+		if i1 < i2 {
+			return -1
+		}
+	case float32:
+		f1, f2 := v1.(float32), v2.(float32)
+		if f1 > f2 {
+			return 1
+		}
+		if f1 < f2 {
+			return -1
+		}
+	case float64:
+		f1, f2 := v1.(float64), v2.(float64)
+		if f1 > f2 {
+			return 1
+		}
+		if f1 < f2 {
+			return -1
+		}
+
+	case string:
+		s1, s2 := v1.(string), v2.(string)
+		if s1 > s2 {
+			return 1
+		}
+		if s1 < s2 {
+			return -1
+		}
+	}
+
+	return 0
+}

+ 120 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/sqldrv/driver.go

@@ -0,0 +1,120 @@
+package sqldrv
+
+import (
+	"bytes"
+	"database/sql"
+	"fmt"
+	"reflect"
+
+	"github.com/gohxs/hqi"
+)
+
+type Driver struct {
+	DB        *sql.DB
+	TableName string
+}
+
+var (
+	TypeMap = map[string]string{
+		"int": "integer", "int8": "integer", "int16": "integer", "int32": "integer", "int64": "integer",
+		"string": "text",
+	}
+)
+
+func (d *Driver) Schema(obj interface{}) error {
+	var qry bytes.Buffer
+	qry.WriteString(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (", d.TableName))
+	elemTyp := reflect.TypeOf(obj)
+	for i := 0; i < elemTyp.NumField(); i++ {
+		if i != 0 {
+			qry.WriteString(", ")
+		}
+		qry.WriteString(fmt.Sprintf("%s", elemTyp.Field(i).Name))
+		sqlType := TypeMap[elemTyp.Field(i).Type.Name()]
+		qry.WriteString(fmt.Sprintf(" %s", sqlType))
+	}
+	qry.WriteString(");")
+
+	//log.Printf("CREATE stmt:\n%s\n", qry.String())
+	_, err := d.DB.Exec(qry.String())
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+func (d *Driver) Insert(objs ...interface{}) error {
+
+	var qry bytes.Buffer
+	var qryParam = []interface{}{}
+	var objCount = 0
+
+	qry.WriteString("INSERT INTO ")
+	qry.WriteString(d.TableName)
+	qry.WriteString(" VALUES\n")
+	getObj := func(elemVal reflect.Value) {
+		if objCount != 0 {
+			qry.WriteString(",\n")
+		}
+		qry.WriteString("(")
+		//elemVal := reflect.ValueOf(obj)
+		for i := 0; i < elemVal.NumField(); i++ {
+			if i != 0 {
+				qry.WriteString(", ")
+			}
+			qryParam = append(qryParam, elemVal.Field(i).Interface())
+			qry.WriteString(fmt.Sprintf("$%d", len(qryParam)))
+		}
+		qry.WriteString(")")
+		// Execute here
+		objCount++
+	}
+
+	// Create Insert stmt
+	for _, obj := range objs {
+		objTyp := reflect.TypeOf(obj)
+		objVal := reflect.Indirect(reflect.ValueOf(obj))
+		if objTyp.Kind() == reflect.Slice {
+			for i := 0; i < objVal.Len(); i++ {
+				getObj(objVal.Index(i))
+			}
+			continue
+		}
+		getObj(objVal)
+	}
+	qry.WriteString(";")
+	//log.Printf("INSERT:\n%s %v\n", qry.String(), qryParam)
+	//log.Println("  Param:", qryParam)
+	_, err := d.DB.Exec(qry.String(), qryParam...)
+	if err != nil {
+		return fmt.Errorf("%s\n%s\n%v", err, qry.String(), qryParam)
+	}
+	return nil
+}
+
+func (d *Driver) Query(qp *hqi.QueryParam, res interface{}) error {
+	e := Executor{driver: d}
+	e.Where(qp.Samples)
+	e.Sort(qp.Sort)
+	e.Limit(qp.Skip, qp.Max)
+
+	return e.Retrieve(res)
+}
+
+func (d *Driver) Count(qp *hqi.QueryParam) int {
+	return -1
+}
+func (d *Driver) Delete(qp *hqi.QueryParam) error {
+	e := Executor{driver: d}
+	e.Where(qp.Samples)
+	return e.Delete()
+}
+func (d *Driver) Update(qp *hqi.QueryParam, obj interface{}) error {
+	return hqi.ErrNotImplemented
+}
+
+/*
+func (d *Driver) Executor() hqi.Executor {
+	ex := Executor{driver: d}
+	return &ex
+}*/

+ 185 - 0
backend/src/vendor/github.com/gohxs/hqi/drv/sqldrv/executor.go

@@ -0,0 +1,185 @@
+// Package sqlite test a different implementation
+package sqldrv
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/gohxs/hqi"
+)
+
+//Executor sqlite executor
+type Executor struct {
+	driver      *Driver
+	whereClause string
+	orderClause string
+	limitClause string
+	fieldVar    []interface{}
+}
+
+//Match matcher implementation
+func (e *Executor) Where(samples []hqi.M) {
+	var qry bytes.Buffer
+	if len(samples) == 0 {
+		return
+	}
+	qry.WriteString("WHERE ")
+	for sampleI, sample := range samples {
+		if sampleI != 0 {
+			qry.WriteString(" OR ")
+		}
+		qry.WriteString("(")
+		c := 0
+		for field, value := range sample {
+			if c != 0 {
+				qry.WriteString(" AND ")
+			}
+			e.fieldVar = append(e.fieldVar, value)
+
+			var op = "="
+			fname := field
+			// we can check a suffix here:
+			suffix := field[len(field)-1]
+			if suffix == '>' { // Check for others
+				op = string(suffix)
+				fname = strings.TrimSpace(field[:len(field)-1])
+			}
+
+			qry.WriteString(fmt.Sprintf("%s %s $%d", fname, op, len(e.fieldVar)))
+			c++
+		}
+		qry.WriteString(")")
+	}
+	//log.Println("Where:", qry.String())
+
+	e.whereClause = qry.String()
+}
+
+//Sort sorter implementation
+func (e *Executor) Sort(fields []hqi.Field) {
+	var qry bytes.Buffer
+	// Sorter
+	for i, sort := range fields {
+		if i == 0 {
+			qry.WriteString("ORDER BY")
+		} else {
+			qry.WriteString(",")
+		}
+		if sort.Value == hqi.SortAsc {
+			qry.WriteString(fmt.Sprintf(" %s ASC", sort.Name))
+		} else {
+			qry.WriteString(fmt.Sprintf(" %s DESC", sort.Name))
+		}
+	}
+	e.orderClause = qry.String()
+}
+
+//Range ranger implementation
+func (e *Executor) Limit(skip, max int) {
+	var qry bytes.Buffer
+
+	if max > 0 {
+		qry.WriteString(fmt.Sprintf("LIMIT %d", max))
+	}
+	if skip > 0 {
+		qry.WriteString(fmt.Sprintf(" OFFSET %d", skip))
+	}
+
+	e.limitClause = qry.String()
+}
+
+// Retrieve implementation
+func (e *Executor) Retrieve(res interface{}) error {
+	// ignore kind?
+
+	// Build Select
+	var qry bytes.Buffer
+	qry.WriteString(fmt.Sprintf("SELECT"))
+	elemTyp := reflect.TypeOf(res).Elem().Elem()
+	for i := 0; i < elemTyp.NumField(); i++ {
+		if i != 0 {
+			qry.WriteString(",")
+		}
+		qry.WriteString(fmt.Sprintf(" %s", elemTyp.Field(i).Name))
+	}
+	qry.WriteString(fmt.Sprintf(" FROM %s", e.driver.TableName))
+
+	if e.whereClause != "" {
+		qry.WriteString("\n")
+		qry.WriteString(e.whereClause)
+	}
+	if e.orderClause != "" {
+		qry.WriteString("\n")
+		qry.WriteString(e.orderClause)
+	}
+	if e.limitClause != "" {
+		qry.WriteString("\n")
+		qry.WriteString(e.limitClause)
+	}
+
+	// Retriever
+	qryStr := qry.String()
+	//log.Printf("SQL:\n%s\n", qryStr)
+	qrows, err := e.driver.DB.Query(qryStr, e.fieldVar...)
+	if err != nil {
+		return err
+	}
+	//Build Fields to scan
+	// Pointer of struct
+	resType := reflect.TypeOf(res).Elem()
+	resList := reflect.MakeSlice(resType, 0, 1)
+	for qrows.Next() {
+		var fields []interface{}
+		sliceElem := reflect.New(elemTyp) // new Struct
+		for i := 0; i < elemTyp.NumField(); i++ {
+			// Create new Field
+			fieldPtr := sliceElem.Elem().Field(i).Addr()
+			fields = append(fields, fieldPtr.Interface())
+		}
+		qrows.Scan(fields...)
+		//Copy struct Content
+		resList = reflect.Append(resList, sliceElem.Elem())
+
+		//Scan some how
+	}
+	reflect.ValueOf(res).Elem().Set(resList)
+
+	return nil
+}
+
+func (e *Executor) Delete() error {
+	var qry bytes.Buffer
+
+	// We have where clause
+	fmt.Fprintf(&qry, "DELETE FROM %s", e.driver.TableName)
+	//qry.WriteString("DELETE FROM " + e.driver.TableName)
+	if e.whereClause != "" {
+		qry.WriteString("\n")
+		qry.WriteString(e.whereClause)
+	}
+
+	qryStr := qry.String()
+
+	//log.Println("Query:", qryStr, e.fieldVar)
+
+	_, err := e.driver.DB.Exec(qryStr, e.fieldVar...)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func isZero(x interface{}) bool {
+	return reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())
+}
+
+/*func CreateExecutor(db *sql.DB, tableName string) hqi.ExecFunc {
+	return func(qd *hqi.BuilderData, res interface{}) error {
+		e := &Executor{db: db, tableName: tableName}
+
+		return qd.Execute(e, res)
+	}
+}*/

BIN
backend/src/vendor/github.com/gohxs/hqi/drv/tmp.sqlite3


+ 31 - 0
backend/src/vendor/github.com/gohxs/hqi/executor.go.bak

@@ -0,0 +1,31 @@
+package hql
+
+// Unnecessary Executor
+
+// Collector
+// Should be called executor
+type Executor interface {
+	Matcher
+	Sorter
+	Ranger
+	Retriever
+}
+
+// Initialize the schema
+type Schemer interface {
+	Schema(sample interface{})
+}
+
+type Matcher interface {
+	Match(samples []M)
+}
+type Sorter interface {
+	Sort([]Field)
+}
+type Ranger interface {
+	Range(skip, max int)
+}
+
+type Retriever interface {
+	Retrieve(res interface{})
+}

+ 141 - 0
backend/src/vendor/github.com/gohxs/hqi/query.go

@@ -0,0 +1,141 @@
+package hqi
+
+import (
+	"encoding/json"
+	"errors"
+	"reflect"
+	"strings"
+)
+
+var (
+	ErrNotImplemented = errors.New("Not implemented")
+)
+
+const (
+
+	//ResultCount get the number of entries instead of Content
+	ResultCount = iota
+	//ResultOne get one result
+	ResultOne
+	//ResultList get a list of results (even one)
+	ResultList
+	//SortAsc sort smaller to bigger
+	SortAsc = iota
+	//SortDesc sort bigger to smaller
+	SortDesc
+)
+
+//Field common field type
+type (
+	Field struct {
+		Name  string
+		Value interface{}
+	}
+	M     map[string]interface{}
+	Query struct {
+		driver Driver
+	}
+)
+
+func NewQuery(driver Driver) Query {
+	if driver == nil {
+		panic("Driver is nil")
+	}
+	return Query{driver}
+}
+
+//Find Initiates a query builder
+func (q *Query) Find(samples ...interface{}) SecondStage {
+	// Convert samples to hql.M
+	samplesMap := []M{}
+	for _, sample := range samples {
+		var cur M
+		switch t := sample.(type) {
+		case M:
+			cur = t
+		case map[string]interface{}:
+			cur = t
+		case string:
+			cur = M{}
+			json.Unmarshal([]byte(t), &cur)
+		default:
+			//TODO: do own conversion
+			cur = s2m(sample)
+
+			/*cur = M{}
+
+			data, _ := json.Marshal(sample)
+			json.Unmarshal(data, &cur)*/
+		}
+		samplesMap = append(samplesMap, cur)
+	}
+
+	//Convert samples to map[string]interface{}
+
+	b := Builder{data: QueryParam{Samples: samplesMap}, driver: q.driver}
+	return &b
+}
+
+func (q *Query) Schema(sample interface{}) error {
+	return q.driver.Schema(sample)
+}
+func (q *Query) Insert(objs ...interface{}) error {
+	return q.driver.Insert(objs...)
+}
+
+// Convert struct to M
+func s2m(obj interface{}) M {
+	var ret = M{}
+	//objTyp := reflect.TypeOf(obj)
+	objVal := reflect.ValueOf(obj)
+	for i := 0; i < objVal.Type().NumField(); i++ {
+		fieldTyp := objVal.Type().Field(i)
+		value := objVal.Field(i)
+		valI := value.Interface()
+
+		fName := fieldTyp.Name
+		omitEmpty := false
+
+		// PARSE struct TAGS
+		tagStr, ok := fieldTyp.Tag.Lookup("hqi")
+		if ok {
+			opts := strings.Split(tagStr, ",")
+			if opts[0] != "" {
+				fName = opts[0]
+			}
+			if len(opts) > 1 && opts[1] == "omitempty" {
+				omitEmpty = true
+			}
+		}
+
+		// Check nil or zero if omitEmpty
+		if valI == nil || (isZero(valI) && omitEmpty) {
+			continue
+		}
+		valKind := reflect.TypeOf(valI).Kind()
+		switch valKind {
+		case reflect.Slice:
+			var s = []M{} // new slice
+			for si := 0; si < value.Len(); si++ {
+				s = append(s, s2m(value.Index(si).Interface()))
+			}
+			ret[fName] = s
+		case reflect.Map:
+			var m = M{}
+			for _, k := range value.MapKeys() {
+				m[k.String()] = value.MapIndex(k).Interface()
+			}
+			ret[fName] = m
+		case reflect.Struct:
+			ret[fName] = s2m(valI) // recursive
+		default:
+			ret[fName] = valI
+
+		}
+	}
+	return ret
+}
+
+func isZero(x interface{}) bool {
+	return reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())
+}

+ 106 - 0
backend/src/vendor/github.com/gohxs/hqi/samples/main.go

@@ -0,0 +1,106 @@
+package main
+
+import (
+	"database/sql"
+	"log"
+
+	mgo "gopkg.in/mgo.v2-unstable"
+
+	"github.com/gohxs/hqi"
+	"github.com/gohxs/hqi/drv/mgodrv"
+	"github.com/gohxs/hqi/drv/slicedrv"
+	"github.com/gohxs/hqi/drv/sqldrv"
+	_ "github.com/lib/pq"
+	_ "github.com/mattn/go-sqlite3"
+)
+
+/*type Details struct {
+	Key   string
+	Value string
+}*/
+type UserAccount struct {
+	Kind string
+	Data map[string]string
+}
+type User struct {
+	Name        string
+	Description string
+	Account     []UserAccount
+}
+
+var coll = []User{}
+
+func main() {
+
+	// Init driver
+	drv := driverMGO()
+	//drv := driverSLICE()
+	q := hqi.NewQuery(drv)
+
+	q.Insert(&User{"admin", "Administration account", []UserAccount{{"email", map[string]string{"email": "admin@domain.tld", "pwd": "1q2w3e"}}}})
+	q.Insert(&User{"user", "Regular user", []UserAccount{{"email", map[string]string{"email": "user@domain.tld", "pwd": "1q2w3e"}}}})
+
+	// Lets say user login with email and passwd
+
+	var email = "admin@domain.tld"
+	var pwd = "1q2w3e"
+
+	var res []User
+
+	q.Find(hqi.M{
+		"Account": hqi.M{
+			"Kind": "email",
+			"Data": hqi.M{
+				"email": email,
+				"pwd":   pwd,
+			},
+		},
+	}).List(&res)
+	if len(res) > 0 {
+		log.Println("Result:", res)
+	} else {
+		log.Println("Invalid ogin")
+	}
+}
+
+var (
+	postgreDSN = "user=admin dbname=hqitest sslmode=disable"
+	sqliteDSN  = "tmp.sqlite3"
+)
+
+func driverSQL(sqldriver string, dsn string) *sqldrv.Driver {
+
+	db, err := sql.Open(sqldriver, dsn)
+	if err != nil {
+		return nil
+	}
+	_, err = db.Exec("DROP TABLE IF EXISTS hqitest")
+	if err != nil {
+		return nil
+	}
+	db.Exec("DROP TABLE IF EXISTS hqitest")
+	//Schema this
+	driver := &sqldrv.Driver{db, "hqitest"}
+
+	q := hqi.NewQuery(driver)
+	err = q.Schema(User{})
+	if err != nil {
+		panic(err)
+	}
+	return driver
+}
+
+func driverSLICE() *slicedrv.Driver {
+	return &slicedrv.Driver{&coll}
+}
+func driverMGO() *mgodrv.Driver {
+	session, err := mgo.Dial("mongodb://localhost/mgo-test")
+	if err != nil {
+		return nil
+	}
+	coll := session.DB("mgo-test").C("hqitest")
+	coll.DropCollection()
+
+	return &mgodrv.Driver{coll}
+
+}

+ 144 - 0
backend/src/vendor/github.com/gohxs/hqi/tester/tester.go

@@ -0,0 +1,144 @@
+package tester
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/gohxs/hqi"
+)
+
+// Something to test implementation can be used in test packages for drivers
+type Model struct {
+	Name  string
+	Value int
+}
+
+var (
+	Data = []Model{
+		{"aaa", 1},
+		{"aaa", 2},
+		{"bbb", 3},
+		{"bbb", 4},
+		{"ccc", 5},
+		{"ccc", 6},
+	}
+)
+
+func PrepareHQI(t Testing, dc func() hqi.Driver) hqi.Query {
+	q := hqi.NewQuery(dc())
+	//e := &ErrChecker{t}
+
+	var err error
+	err = q.Schema(Model{})
+	if err != nil {
+		panic(err)
+	}
+	err = q.Insert(Data)
+	if err != nil {
+		panic(err)
+	}
+
+	err = q.Insert(Data)
+	if err != nil {
+		panic(err)
+	}
+	// Needs drop delete All
+	//e.MCheckEQ("Creating schema", q.Schema(Model{}), nil)
+	// Double data
+	//	e.MCheckEQ(fmt.Sprint("Inserting data ", Data), q.Insert(Data), nil)
+	//	e.MCheckEQ(fmt.Sprint("Inserting data AGAIN", Data), q.Insert(Data), nil)
+
+	return q
+}
+
+//Tester  testing implementation on drivers
+func Test(t *testing.T, dc func() hqi.Driver) {
+	{
+		q := hqi.NewQuery(dc())
+		e := &ErrChecker{t}
+		// Needs drop delete All
+		e.MCheckEQ("Creating schema", q.Schema(Model{}), nil)
+		// Double data
+		e.MCheckEQ(fmt.Sprint("Inserting data ", Data), q.Insert(Data), nil)
+		e.MCheckEQ(fmt.Sprint("Inserting data AGAIN", Data), q.Insert(Data), nil)
+	}
+
+	// Initialize data
+	t.Run("Match", func(t *testing.T) {
+		q := PrepareHQI(t, dc)
+		var res []Model
+		e := &ErrChecker{t}
+		q.Find(`{"Name":"aaa"}`).List(&res)
+		e.CheckEQ(fmt.Sprint(res), "[{aaa 1} {aaa 2} {aaa 1} {aaa 2}]")
+	})
+
+	t.Run("Skip&Max", func(t *testing.T) {
+		q := PrepareHQI(t, dc)
+		var res []Model
+		e := &ErrChecker{t}
+		q.Find().Skip(4).Max(2).List(&res)
+		e.CheckEQ(fmt.Sprint(res), "[{ccc 5} {ccc 6}]")
+	})
+
+	t.Run("Sort(name)&Max", func(t *testing.T) {
+		q := PrepareHQI(t, dc)
+		var res []Model
+		e := &ErrChecker{t}
+		q.Find().Sort("-Name", "Value").Max(2).List(&res)
+		e.CheckEQ(fmt.Sprint(res), "[{ccc 5} {ccc 5}]")
+	})
+
+	t.Run("Sort(-Name,-Value)&Max", func(t *testing.T) {
+		q := PrepareHQI(t, dc)
+		var res []Model
+		e := &ErrChecker{t}
+		q.Find().Sort("-Name", "-Value").Max(2).List(&res)
+		e.CheckEQ(fmt.Sprint(res), "[{ccc 6} {ccc 6}]")
+	})
+	t.Run("Remove", func(t *testing.T) {
+		q := PrepareHQI(t, dc)
+		var res []Model
+		e := &ErrChecker{t}
+		var err error
+
+		err = q.Find(hqi.M{"Name": "ccc", "Value": 6}).Delete()
+		e.CheckEQ(err, nil)
+
+		q.Find(hqi.M{"Name": "ccc"}).List(&res)
+		e.CheckEQ(fmt.Sprint(res), "[{ccc 5} {ccc 5}]")
+	})
+	t.Run("RemoveOR", func(t *testing.T) {
+		q := PrepareHQI(t, dc)
+		var res []Model
+		e := &ErrChecker{t}
+		var err error
+
+		// Remove all bbb 6 or all aaa
+		err = q.Find(hqi.M{"Name": "bbb", "Value": 4}, hqi.M{"Name": "aaa"}, hqi.M{"Name": "ccc", "Value": 6}).Delete()
+		e.CheckEQ(err, nil)
+
+		q.Find().Sort("Name", "Value").List(&res)
+		e.CheckEQ(fmt.Sprint(res), "[{bbb 3} {bbb 3} {ccc 5} {ccc 5}]")
+
+	})
+}
+
+// Benchmark
+func Benchmark(b *testing.B, getDriver func() hqi.Driver) {
+	q := PrepareHQI(b, getDriver)
+	b.Run("Match", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			var res []Model
+			q.Find(hqi.M{"Name": "aaa"}).
+				List(&res)
+		}
+	})
+	b.Run("Insert&Delete", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			q.Insert(Model{Name: "zzz", Value: 99})
+			q.Find(hqi.M{"Name": "zzz"}).Delete()
+
+		}
+	})
+
+}

+ 52 - 0
backend/src/vendor/github.com/gohxs/hqi/tester/testutils.go

@@ -0,0 +1,52 @@
+package tester
+
+import (
+	"fmt"
+	"testing"
+)
+
+// We want something that have Error and Log merely
+type Testing interface {
+	Error(...interface{})
+	Log(...interface{})
+}
+
+// ErrChecker Utils for testing
+type ErrChecker struct {
+	//t *testing.T
+	t Testing
+}
+
+//CheckERR if err is not nil
+
+//CheckEQ checks if two values are equals
+func (ec *ErrChecker) CheckEQ(v1 interface{}, v2 interface{}) {
+	if v1 != v2 {
+		smsg := fmt.Sprint("\n\033[01;31mwants : ", v2, "\033[0m\n\033[01;31mgot   : ", v1, "\033[0m")
+		ec.t.Error("\033[0;31mFAIL\033[0m", smsg)
+		//ec.t.Error("\033[01;31m\nwants : ", v2, "\ngot   : ", v1, "\033[0m")
+		return
+	}
+	// Only if verbose
+	if testing.Verbose() {
+		//ec.t.Log("\033[01;30m\nwants : ", v2, "\ngot   : ", v1, "\033[0m")
+		smsg := fmt.Sprint("\n\033[01;30mwants : ", v2, "\033[0m\n\033[01;30mgot   : ", v1, "\033[0m")
+		ec.t.Log("\033[00;32mPASS\033[0m", smsg)
+	}
+}
+
+//MCheckEQ checks if two values are equals
+func (ec *ErrChecker) MCheckEQ(msg string, v1 interface{}, v2 interface{}) {
+	if v1 != v2 {
+		smsg := fmt.Sprint("\n\033[01;31mwants : ", v2, "\033[0m\n\033[01;31mgot   : ", v1, "\033[0m")
+		ec.t.Error("\033[0;31mFAIL\033[01;31m", msg, "\033[0m", smsg)
+		//ec.t.Error("\033[01;31m\nwants : ", v2, "\ngot   : ", v1, "\033[0m")
+		return
+	}
+	// Only if verbose
+	if testing.Verbose() {
+		//ec.t.Log("\033[01;30m\nwants : ", v2, "\ngot   : ", v1, "\033[0m")
+		smsg := fmt.Sprint("\n\033[01;30mwants : ", v2, "\033[0m\n\033[01;30mgot   : ", v1, "\033[0m")
+		ec.t.Log("\033[00;32mPASS\033[01;34m", msg, "\033[0m", smsg)
+	}
+}

+ 9 - 0
backend/src/vendor/github.com/google/uuid/.travis.yml

@@ -0,0 +1,9 @@
+language: go
+
+go:
+  - 1.4.3
+  - 1.5.3
+  - tip
+
+script:
+  - go test -v ./...

+ 10 - 0
backend/src/vendor/github.com/google/uuid/CONTRIBUTING.md

@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.

+ 9 - 0
backend/src/vendor/github.com/google/uuid/CONTRIBUTORS

@@ -0,0 +1,9 @@
+Paul Borman <borman@google.com>
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza

+ 27 - 0
backend/src/vendor/github.com/google/uuid/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 23 - 0
backend/src/vendor/github.com/google/uuid/README.md

@@ -0,0 +1,23 @@
+**This package is currently in development and the API may not be stable.**
+
+The API will become stable with v1.
+
+# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](http://tools.ietf.org/html/rfc4122)
+and DCE 1.1: Authentication and Security Services. 
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid).  It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice.  One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+`go get github.com/google/uuid`
+
+###### Documentation 
+[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here: 
+http://godoc.org/github.com/google/uuid

+ 80 - 0
backend/src/vendor/github.com/google/uuid/dce.go

@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+	Person = Domain(0)
+	Group  = Domain(1)
+	Org    = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group.  The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+	uuid, err := NewUUID()
+	if err == nil {
+		uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+		uuid[9] = byte(domain)
+		binary.BigEndian.PutUint32(uuid[0:], id)
+	}
+	return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+//  NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+	return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+//  NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+	return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID.  Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+	return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+	return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+	switch d {
+	case Person:
+		return "Person"
+	case Group:
+		return "Group"
+	case Org:
+		return "Org"
+	}
+	return fmt.Sprintf("Domain%d", int(d))
+}

+ 12 - 0
backend/src/vendor/github.com/google/uuid/doc.go

@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array.  UUIDs may be used as keys to
+// maps or compared directly.
+package uuid

+ 53 - 0
backend/src/vendor/github.com/google/uuid/hash.go

@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+	NameSpaceDNS  = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceURL  = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+	Nil           UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h.  The hash should be at least 16 byte in length.  The
+// first 16 bytes of the hash are used to form the UUID.  The version of the
+// UUID will be the lower 4 bits of version.  NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+	h.Reset()
+	h.Write(space[:])
+	h.Write([]byte(data))
+	s := h.Sum(nil)
+	var uuid UUID
+	copy(uuid[:], s)
+	uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+	return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+	return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+	return NewHash(sha1.New(), space, data, 5)
+}

+ 62 - 0
backend/src/vendor/github.com/google/uuid/json_test.go

@@ -0,0 +1,62 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/json"
+	"reflect"
+	"testing"
+)
+
+var testUUID = Must(Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479"))
+
+func TestJSON(t *testing.T) {
+	type S struct {
+		ID1 UUID
+		ID2 UUID
+	}
+	s1 := S{ID1: testUUID}
+	data, err := json.Marshal(&s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	var s2 S
+	if err := json.Unmarshal(data, &s2); err != nil {
+		t.Fatal(err)
+	}
+	if !reflect.DeepEqual(&s1, &s2) {
+		t.Errorf("got %#v, want %#v", s2, s1)
+	}
+}
+
+func BenchmarkUUID_MarshalJSON(b *testing.B) {
+	x := &struct {
+		UUID UUID `json:"uuid"`
+	}{}
+	var err error
+	x.UUID, err = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
+	if err != nil {
+		b.Fatal(err)
+	}
+	for i := 0; i < b.N; i++ {
+		js, err := json.Marshal(x)
+		if err != nil {
+			b.Fatalf("marshal json: %#v (%v)", js, err)
+		}
+	}
+}
+
+func BenchmarkUUID_UnmarshalJSON(b *testing.B) {
+	js := []byte(`{"uuid":"f47ac10b-58cc-0372-8567-0e02b2c3d479"}`)
+	var x *struct {
+		UUID UUID `json:"uuid"`
+	}
+	for i := 0; i < b.N; i++ {
+		err := json.Unmarshal(js, &x)
+		if err != nil {
+			b.Fatalf("marshal json: %#v (%v)", js, err)
+		}
+	}
+}

+ 39 - 0
backend/src/vendor/github.com/google/uuid/marshal.go

@@ -0,0 +1,39 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+	var js [36]byte
+	encodeHex(js[:], uuid)
+	return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+	// See comment in ParseBytes why we do this.
+	// id, err := ParseBytes(data)
+	id, err := ParseBytes(data)
+	if err == nil {
+		*uuid = id
+	}
+	return err
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+	return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(uuid[:], data)
+	return nil
+}

+ 103 - 0
backend/src/vendor/github.com/google/uuid/node.go

@@ -0,0 +1,103 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"net"
+	"sync"
+)
+
+var (
+	nodeMu     sync.Mutex
+	interfaces []net.Interface // cached list of interfaces
+	ifname     string          // name of interface being used
+	nodeID     [6]byte         // hardware for version 1 UUIDs
+	zeroID     [6]byte         // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived.  The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated.  If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+	if interfaces == nil {
+		var err error
+		interfaces, err = net.Interfaces()
+		if err != nil && name != "" {
+			return false
+		}
+	}
+
+	for _, ifs := range interfaces {
+		if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+			copy(nodeID[:], ifs.HardwareAddr)
+			ifname = ifs.Name
+			return true
+		}
+	}
+
+	// We found no interfaces with a valid hardware address.  If name
+	// does not specify a specific interface generate a random Node ID
+	// (section 4.1.6)
+	if name == "" {
+		randomBits(nodeID[:])
+		return true
+	}
+	return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nid := nodeID
+	return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs.  The first 6 bytes
+// of id are used.  If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+	if len(id) < 6 {
+		return false
+	}
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	copy(nodeID[:], id)
+	ifname = "user"
+	return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid.  It returns nil if uuid is
+// not valid.  The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+	if len(uuid) != 16 {
+		return nil
+	}
+	var node [6]byte
+	copy(node[:], uuid[10:])
+	return node[:]
+}

+ 66 - 0
backend/src/vendor/github.com/google/uuid/seq_test.go

@@ -0,0 +1,66 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"flag"
+	"runtime"
+	"testing"
+	"time"
+)
+
+// This test is only run when --regressions is passed on the go test line.
+var regressions = flag.Bool("regressions", false, "run uuid regression tests")
+
+// TestClockSeqRace tests for a particular race condition of returning two
+// identical Version1 UUIDs.  The duration of 1 minute was chosen as the race
+// condition, before being fixed, nearly always occured in under 30 seconds.
+func TestClockSeqRace(t *testing.T) {
+	if !*regressions {
+		t.Skip("skipping regression tests")
+	}
+	duration := time.Minute
+
+	done := make(chan struct{})
+	defer close(done)
+
+	ch := make(chan UUID, 10000)
+	ncpu := runtime.NumCPU()
+	switch ncpu {
+	case 0, 1:
+		// We can't run the test effectively.
+		t.Skip("skipping race test, only one CPU detected")
+		return
+	default:
+		runtime.GOMAXPROCS(ncpu)
+	}
+	for i := 0; i < ncpu; i++ {
+		go func() {
+			for {
+				select {
+				case <-done:
+					return
+				case ch <- Must(NewUUID()):
+				}
+			}
+		}()
+	}
+
+	uuids := make(map[string]bool)
+	cnt := 0
+	start := time.Now()
+	for u := range ch {
+		s := u.String()
+		if uuids[s] {
+			t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
+			return
+		}
+		uuids[s] = true
+		if time.Since(start) > duration {
+			return
+		}
+		cnt++
+	}
+}

+ 59 - 0
backend/src/vendor/github.com/google/uuid/sql.go

@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"database/sql/driver"
+	"fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case nil:
+		return nil
+
+	case string:
+		// if an empty UUID comes from a table, we return a null UUID
+		if src == "" {
+			return nil
+		}
+
+		// see Parse for required string format
+		u, err := Parse(src)
+		if err != nil {
+			return fmt.Errorf("Scan: %v", err)
+		}
+
+		*uuid = u
+
+	case []byte:
+		// if an empty UUID comes from a table, we return a null UUID
+		if len(src) == 0 {
+			return nil
+		}
+
+		// assumes a simple slice of bytes if 16 bytes
+		// otherwise attempts to parse
+		if len(src) != 16 {
+			return uuid.Scan(string(src))
+		}
+		copy((*uuid)[:], src)
+
+	default:
+		return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+	}
+
+	return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+	return uuid.String(), nil
+}

+ 113 - 0
backend/src/vendor/github.com/google/uuid/sql_test.go

@@ -0,0 +1,113 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"strings"
+	"testing"
+)
+
+func TestScan(t *testing.T) {
+	var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
+	var badTypeTest int = 6
+	var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4"
+
+	byteTest := make([]byte, 16)
+	byteTestUUID := Must(Parse(stringTest))
+	copy(byteTest, byteTestUUID[:])
+
+	// sunny day tests
+
+	var uuid UUID
+	err := (&uuid).Scan(stringTest)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = (&uuid).Scan([]byte(stringTest))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = (&uuid).Scan(byteTest)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// bad type tests
+
+	err = (&uuid).Scan(badTypeTest)
+	if err == nil {
+		t.Error("int correctly parsed and shouldn't have")
+	}
+	if !strings.Contains(err.Error(), "unable to scan type") {
+		t.Error("attempting to parse an int returned an incorrect error message")
+	}
+
+	// invalid/incomplete uuids
+
+	err = (&uuid).Scan(invalidTest)
+	if err == nil {
+		t.Error("invalid uuid was parsed without error")
+	}
+	if !strings.Contains(err.Error(), "invalid UUID") {
+		t.Error("attempting to parse an invalid UUID returned an incorrect error message")
+	}
+
+	err = (&uuid).Scan(byteTest[:len(byteTest)-2])
+	if err == nil {
+		t.Error("invalid byte uuid was parsed without error")
+	}
+	if !strings.Contains(err.Error(), "invalid UUID") {
+		t.Error("attempting to parse an invalid byte UUID returned an incorrect error message")
+	}
+
+	// empty tests
+
+	uuid = UUID{}
+	var emptySlice []byte
+	err = (&uuid).Scan(emptySlice)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, v := range uuid {
+		if v != 0 {
+			t.Error("UUID was not nil after scanning empty byte slice")
+		}
+	}
+
+	uuid = UUID{}
+	var emptyString string
+	err = (&uuid).Scan(emptyString)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for _, v := range uuid {
+		if v != 0 {
+			t.Error("UUID was not nil after scanning empty byte slice")
+		}
+	}
+
+	uuid = UUID{}
+	err = (&uuid).Scan(nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for _, v := range uuid {
+		if v != 0 {
+			t.Error("UUID was not nil after scanning nil")
+		}
+	}
+}
+
+func TestValue(t *testing.T) {
+	stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479"
+	uuid := Must(Parse(stringTest))
+	val, _ := uuid.Value()
+	if val != stringTest {
+		t.Error("Value() did not return expected string")
+	}
+}

+ 123 - 0
backend/src/vendor/github.com/google/uuid/time.go

@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"sync"
+	"time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+	lillian    = 2299160          // Julian day of 15 Oct 1582
+	unix       = 2440587          // Julian day of 1 Jan 1970
+	epoch      = unix - lillian   // Days between epochs
+	g1582      = epoch * 86400    // seconds between epochs
+	g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+	timeMu   sync.Mutex
+	lasttime uint64 // last time we returned
+	clockSeq uint16 // clock sequence for this run
+
+	timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+	sec = int64(t - g1582ns100)
+	nsec = (sec % 10000000) * 100
+	sec /= 10000000
+	return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed.  An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+	t := timeNow()
+
+	// If we don't have a clock sequence already, set one.
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	now := uint64(t.UnixNano()/100) + g1582ns100
+
+	// If time has gone backwards with this clock sequence then we
+	// increment the clock sequence
+	if now <= lasttime {
+		clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+	}
+	lasttime = now
+	return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set.  The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated.  Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID.  (section 4.2.1.1)
+func ClockSequence() int {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return clockSequence()
+}
+
+func clockSequence() int {
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	return int(clockSeq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq.  Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+	if seq == -1 {
+		var b [2]byte
+		randomBits(b[:]) // clock sequence
+		seq = int(b[0])<<8 | int(b[1])
+	}
+	old_seq := clockSeq
+	clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+	if old_seq != clockSeq {
+		lasttime = 0
+	}
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid.  The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+	return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+	return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}

+ 43 - 0
backend/src/vendor/github.com/google/uuid/util.go

@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+	if _, err := io.ReadFull(rander, b); err != nil {
+		panic(err.Error()) // rand should never fail
+	}
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+	b1 := xvalues[x1]
+	b2 := xvalues[x2]
+	return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}

+ 191 - 0
backend/src/vendor/github.com/google/uuid/uuid.go

@@ -0,0 +1,191 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+	Invalid   = Variant(iota) // Invalid UUID
+	RFC4122                   // The variant specified in RFC4122
+	Reserved                  // Reserved, NCS backward compatibility.
+	Microsoft                 // Reserved, Microsoft Corporation backward compatibility.
+	Future                    // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// Parse decodes s into a UUID or returns an error.  Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) (UUID, error) {
+	var uuid UUID
+	if len(s) != 36 {
+		if len(s) != 36+9 {
+			return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+		}
+		if strings.ToLower(s[:9]) != "urn:uuid:" {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+	}
+	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		if v, ok := xtob(s[x], s[x+1]); !ok {
+			return uuid, errors.New("invalid UUID format")
+		} else {
+			uuid[i] = v
+		}
+	}
+	return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+	var uuid UUID
+	if len(b) != 36 {
+		if len(b) != 36+9 {
+			return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+		}
+		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+		}
+		b = b[9:]
+	}
+	if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		if v, ok := xtob(b[x], b[x+1]); !ok {
+			return uuid, errors.New("invalid UUID format")
+		} else {
+			uuid[i] = v
+		}
+	}
+	return uuid, nil
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+	var buf [36]byte
+	encodeHex(buf[:], uuid)
+	return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx,  or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+	var buf [36 + 9]byte
+	copy(buf[:], "urn:uuid:")
+	encodeHex(buf[9:], uuid)
+	return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+	hex.Encode(dst[:], uuid[:4])
+	dst[8] = '-'
+	hex.Encode(dst[9:13], uuid[4:6])
+	dst[13] = '-'
+	hex.Encode(dst[14:18], uuid[6:8])
+	dst[18] = '-'
+	hex.Encode(dst[19:23], uuid[8:10])
+	dst[23] = '-'
+	hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+	switch {
+	case (uuid[8] & 0xc0) == 0x80:
+		return RFC4122
+	case (uuid[8] & 0xe0) == 0xc0:
+		return Microsoft
+	case (uuid[8] & 0xe0) == 0xe0:
+		return Future
+	default:
+		return Reserved
+	}
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+	return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+	if v > 15 {
+		return fmt.Sprintf("BAD_VERSION_%d", v)
+	}
+	return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+	switch v {
+	case RFC4122:
+		return "RFC4122"
+	case Reserved:
+		return "Reserved"
+	case Microsoft:
+		return "Microsoft"
+	case Future:
+		return "Future"
+	case Invalid:
+		return "Invalid"
+	}
+	return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implents io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+	if r == nil {
+		rander = rand.Reader
+		return
+	}
+	rander = r
+}

+ 526 - 0
backend/src/vendor/github.com/google/uuid/uuid_test.go

@@ -0,0 +1,526 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"strings"
+	"testing"
+	"time"
+	"unsafe"
+)
+
+type test struct {
+	in      string
+	version Version
+	variant Variant
+	isuuid  bool
+}
+
+var tests = []test{
+	{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
+	{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
+	{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
+	{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
+	{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
+	{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
+	{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
+	{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
+	{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
+	{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
+	{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
+	{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
+	{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
+	{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
+	{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
+	{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
+
+	{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
+	{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
+	{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
+	{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
+	{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
+	{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
+	{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
+	{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
+	{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
+	{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
+
+	{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
+	{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
+	{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
+	{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
+	{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
+	{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
+}
+
+var constants = []struct {
+	c    interface{}
+	name string
+}{
+	{Person, "Person"},
+	{Group, "Group"},
+	{Org, "Org"},
+	{Invalid, "Invalid"},
+	{RFC4122, "RFC4122"},
+	{Reserved, "Reserved"},
+	{Microsoft, "Microsoft"},
+	{Future, "Future"},
+	{Domain(17), "Domain17"},
+	{Variant(42), "BadVariant42"},
+}
+
+func testTest(t *testing.T, in string, tt test) {
+	uuid, err := Parse(in)
+	if ok := (err == nil); ok != tt.isuuid {
+		t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
+	}
+	if err != nil {
+		return
+	}
+
+	if v := uuid.Variant(); v != tt.variant {
+		t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
+	}
+	if v := uuid.Version(); v != tt.version {
+		t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
+	}
+}
+
+func testBytes(t *testing.T, in []byte, tt test) {
+	uuid, err := ParseBytes(in)
+	if ok := (err == nil); ok != tt.isuuid {
+		t.Errorf("ParseBytes(%s) got %v expected %v\b", in, ok, tt.isuuid)
+	}
+	if err != nil {
+		return
+	}
+	suuid, _ := Parse(string(in))
+	if uuid != suuid {
+		t.Errorf("ParseBytes(%s) got %v expected %v\b", in, uuid, suuid)
+	}
+}
+
+func TestUUID(t *testing.T) {
+	for _, tt := range tests {
+		testTest(t, tt.in, tt)
+		testTest(t, strings.ToUpper(tt.in), tt)
+		testBytes(t, []byte(tt.in), tt)
+	}
+}
+
+func TestConstants(t *testing.T) {
+	for x, tt := range constants {
+		v, ok := tt.c.(fmt.Stringer)
+		if !ok {
+			t.Errorf("%x: %v: not a stringer", x, v)
+		} else if s := v.String(); s != tt.name {
+			v, _ := tt.c.(int)
+			t.Errorf("%x: Constant %T:%d gives %q, expected %q", x, tt.c, v, s, tt.name)
+		}
+	}
+}
+
+func TestRandomUUID(t *testing.T) {
+	m := make(map[string]bool)
+	for x := 1; x < 32; x++ {
+		uuid := New()
+		s := uuid.String()
+		if m[s] {
+			t.Errorf("NewRandom returned duplicated UUID %s", s)
+		}
+		m[s] = true
+		if v := uuid.Version(); v != 4 {
+			t.Errorf("Random UUID of version %s", v)
+		}
+		if uuid.Variant() != RFC4122 {
+			t.Errorf("Random UUID is variant %d", uuid.Variant())
+		}
+	}
+}
+
+func TestNew(t *testing.T) {
+	m := make(map[UUID]bool)
+	for x := 1; x < 32; x++ {
+		s := New()
+		if m[s] {
+			t.Errorf("New returned duplicated UUID %s", s)
+		}
+		m[s] = true
+		uuid, err := Parse(s.String())
+		if err != nil {
+			t.Errorf("New.String() returned %q which does not decode", s)
+			continue
+		}
+		if v := uuid.Version(); v != 4 {
+			t.Errorf("Random UUID of version %s", v)
+		}
+		if uuid.Variant() != RFC4122 {
+			t.Errorf("Random UUID is variant %d", uuid.Variant())
+		}
+	}
+}
+
+func TestClockSeq(t *testing.T) {
+	// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
+	defer func(orig func() time.Time) { timeNow = orig }(timeNow)
+	monTime := time.Now()
+	timeNow = func() time.Time {
+		monTime = monTime.Add(1 * time.Second)
+		return monTime
+	}
+
+	SetClockSequence(-1)
+	uuid1, err := NewUUID()
+	if err != nil {
+		t.Fatalf("could not create UUID: %v", err)
+	}
+	uuid2, err := NewUUID()
+	if err != nil {
+		t.Fatalf("could not create UUID: %v", err)
+	}
+
+	if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 != s2 {
+		t.Errorf("clock sequence %d != %d", s1, s2)
+	}
+
+	SetClockSequence(-1)
+	uuid2, err = NewUUID()
+	if err != nil {
+		t.Fatalf("could not create UUID: %v", err)
+	}
+
+	// Just on the very off chance we generated the same sequence
+	// two times we try again.
+	if uuid1.ClockSequence() == uuid2.ClockSequence() {
+		SetClockSequence(-1)
+		uuid2, err = NewUUID()
+		if err != nil {
+			t.Fatalf("could not create UUID: %v", err)
+		}
+	}
+	if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 == s2 {
+		t.Errorf("Duplicate clock sequence %d", s1)
+	}
+
+	SetClockSequence(0x1234)
+	uuid1, err = NewUUID()
+	if err != nil {
+		t.Fatalf("could not create UUID: %v", err)
+	}
+	if seq := uuid1.ClockSequence(); seq != 0x1234 {
+		t.Errorf("%s: expected seq 0x1234 got 0x%04x", uuid1, seq)
+	}
+}
+
+func TestCoding(t *testing.T) {
+	text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
+	urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
+	data := UUID{
+		0x7d, 0x44, 0x48, 0x40,
+		0x9d, 0xc0,
+		0x11, 0xd1,
+		0xb2, 0x45,
+		0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
+	}
+	if v := data.String(); v != text {
+		t.Errorf("%x: encoded to %s, expected %s", data, v, text)
+	}
+	if v := data.URN(); v != urn {
+		t.Errorf("%x: urn is %s, expected %s", data, v, urn)
+	}
+
+	uuid, err := Parse(text)
+	if err != nil {
+		t.Errorf("Parse returned unexpected error %v", err)
+	}
+	if data != data {
+		t.Errorf("%s: decoded to %s, expected %s", text, uuid, data)
+	}
+}
+
+func TestVersion1(t *testing.T) {
+	uuid1, err := NewUUID()
+	if err != nil {
+		t.Fatalf("could not create UUID: %v", err)
+	}
+	uuid2, err := NewUUID()
+	if err != nil {
+		t.Fatalf("could not create UUID: %v", err)
+	}
+
+	if uuid1 == uuid2 {
+		t.Errorf("%s:duplicate uuid", uuid1)
+	}
+	if v := uuid1.Version(); v != 1 {
+		t.Errorf("%s: version %s expected 1", uuid1, v)
+	}
+	if v := uuid2.Version(); v != 1 {
+		t.Errorf("%s: version %s expected 1", uuid2, v)
+	}
+	n1 := uuid1.NodeID()
+	n2 := uuid2.NodeID()
+	if !bytes.Equal(n1, n2) {
+		t.Errorf("Different nodes %x != %x", n1, n2)
+	}
+	t1 := uuid1.Time()
+	t2 := uuid2.Time()
+	q1 := uuid1.ClockSequence()
+	q2 := uuid2.ClockSequence()
+
+	switch {
+	case t1 == t2 && q1 == q2:
+		t.Error("time stopped")
+	case t1 > t2 && q1 == q2:
+		t.Error("time reversed")
+	case t1 < t2 && q1 != q2:
+		t.Error("clock sequence chaned unexpectedly")
+	}
+}
+
+func TestNode(t *testing.T) {
+	// This test is mostly to make sure we don't leave nodeMu locked.
+	ifname = ""
+	if ni := NodeInterface(); ni != "" {
+		t.Errorf("NodeInterface got %q, want %q", ni, "")
+	}
+	if SetNodeInterface("xyzzy") {
+		t.Error("SetNodeInterface succeeded on a bad interface name")
+	}
+	if !SetNodeInterface("") {
+		t.Error("SetNodeInterface failed")
+	}
+	if ni := NodeInterface(); ni == "" {
+		t.Error("NodeInterface returned an empty string")
+	}
+
+	ni := NodeID()
+	if len(ni) != 6 {
+		t.Errorf("ni got %d bytes, want 6", len(ni))
+	}
+	hasData := false
+	for _, b := range ni {
+		if b != 0 {
+			hasData = true
+		}
+	}
+	if !hasData {
+		t.Error("nodeid is all zeros")
+	}
+
+	id := []byte{1, 2, 3, 4, 5, 6, 7, 8}
+	SetNodeID(id)
+	ni = NodeID()
+	if !bytes.Equal(ni, id[:6]) {
+		t.Errorf("got nodeid %v, want %v", ni, id[:6])
+	}
+
+	if ni := NodeInterface(); ni != "user" {
+		t.Errorf("got inteface %q, want %q", ni, "user")
+	}
+}
+
+func TestNodeAndTime(t *testing.T) {
+	// Time is February 5, 1998 12:30:23.136364800 AM GMT
+
+	uuid, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
+	if err != nil {
+		t.Fatalf("Parser returned unexpected error %v", err)
+	}
+	node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
+
+	ts := uuid.Time()
+	c := time.Unix(ts.UnixTime())
+	want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
+	if !c.Equal(want) {
+		t.Errorf("Got time %v, want %v", c, want)
+	}
+	if !bytes.Equal(node, uuid.NodeID()) {
+		t.Errorf("Expected node %v got %v", node, uuid.NodeID())
+	}
+}
+
+func TestMD5(t *testing.T) {
+	uuid := NewMD5(NameSpaceDNS, []byte("python.org")).String()
+	want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
+	if uuid != want {
+		t.Errorf("MD5: got %q expected %q", uuid, want)
+	}
+}
+
+func TestSHA1(t *testing.T) {
+	uuid := NewSHA1(NameSpaceDNS, []byte("python.org")).String()
+	want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
+	if uuid != want {
+		t.Errorf("SHA1: got %q expected %q", uuid, want)
+	}
+}
+
+func TestNodeID(t *testing.T) {
+	nid := []byte{1, 2, 3, 4, 5, 6}
+	SetNodeInterface("")
+	s := NodeInterface()
+	if s == "" || s == "user" {
+		t.Errorf("NodeInterface %q after SetInteface", s)
+	}
+	node1 := NodeID()
+	if node1 == nil {
+		t.Error("NodeID nil after SetNodeInterface", s)
+	}
+	SetNodeID(nid)
+	s = NodeInterface()
+	if s != "user" {
+		t.Errorf("Expected NodeInterface %q got %q", "user", s)
+	}
+	node2 := NodeID()
+	if node2 == nil {
+		t.Error("NodeID nil after SetNodeID", s)
+	}
+	if bytes.Equal(node1, node2) {
+		t.Error("NodeID not changed after SetNodeID", s)
+	} else if !bytes.Equal(nid, node2) {
+		t.Errorf("NodeID is %x, expected %x", node2, nid)
+	}
+}
+
+func testDCE(t *testing.T, name string, uuid UUID, err error, domain Domain, id uint32) {
+	if err != nil {
+		t.Errorf("%s failed: %v", name, err)
+		return
+	}
+	if v := uuid.Version(); v != 2 {
+		t.Errorf("%s: %s: expected version 2, got %s", name, uuid, v)
+		return
+	}
+	if v := uuid.Domain(); v != domain {
+		t.Errorf("%s: %s: expected domain %d, got %d", name, uuid, domain, v)
+	}
+	if v := uuid.ID(); v != id {
+		t.Errorf("%s: %s: expected id %d, got %d", name, uuid, id, v)
+	}
+}
+
+func TestDCE(t *testing.T) {
+	uuid, err := NewDCESecurity(42, 12345678)
+	testDCE(t, "NewDCESecurity", uuid, err, 42, 12345678)
+	uuid, err = NewDCEPerson()
+	testDCE(t, "NewDCEPerson", uuid, err, Person, uint32(os.Getuid()))
+	uuid, err = NewDCEGroup()
+	testDCE(t, "NewDCEGroup", uuid, err, Group, uint32(os.Getgid()))
+}
+
+type badRand struct{}
+
+func (r badRand) Read(buf []byte) (int, error) {
+	for i, _ := range buf {
+		buf[i] = byte(i)
+	}
+	return len(buf), nil
+}
+
+func TestBadRand(t *testing.T) {
+	SetRand(badRand{})
+	uuid1 := New()
+	uuid2 := New()
+	if uuid1 != uuid2 {
+		t.Errorf("execpted duplicates, got %q and %q", uuid1, uuid2)
+	}
+	SetRand(nil)
+	uuid1 = New()
+	uuid2 = New()
+	if uuid1 == uuid2 {
+		t.Errorf("unexecpted duplicates, got %q", uuid1)
+	}
+}
+
+var asString = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
+var asBytes = []byte(asString)
+
+func BenchmarkParse(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		_, err := Parse(asString)
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+func BenchmarkParseBytes(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		_, err := ParseBytes(asBytes)
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+// parseBytesUnsafe is to benchmark using unsafe.
+func parseBytesUnsafe(b []byte) (UUID, error) {
+	return Parse(*(*string)(unsafe.Pointer(&b)))
+}
+
+
+func BenchmarkParseBytesUnsafe(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		_, err := parseBytesUnsafe(asBytes)
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+// parseBytesCopy is to benchmark not using unsafe.
+func parseBytesCopy(b []byte) (UUID, error) {
+	return Parse(string(b))
+}
+
+func BenchmarkParseBytesCopy(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		_, err := parseBytesCopy(asBytes)
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
+func BenchmarkNew(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		New()
+	}
+}
+
+func BenchmarkUUID_String(b *testing.B) {
+	uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
+	if err != nil {
+		b.Fatal(err)
+	}
+	for i := 0; i < b.N; i++ {
+		if uuid.String() == "" {
+			b.Fatal("invalid uuid")
+		}
+	}
+}
+
+func BenchmarkUUID_URN(b *testing.B) {
+	uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
+	if err != nil {
+		b.Fatal(err)
+	}
+	for i := 0; i < b.N; i++ {
+		if uuid.URN() == "" {
+			b.Fatal("invalid uuid")
+		}
+	}
+}

+ 44 - 0
backend/src/vendor/github.com/google/uuid/version1.go

@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time.  If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically.  If the NodeID cannot
+// be set NewUUID returns nil.  If clock sequence has not been set by
+// SetClockSequence then it will be set automatically.  If GetTime fails to
+// return the current NewUUID returns Nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nodeMu.Unlock()
+
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	timeLow := uint32(now & 0xffffffff)
+	timeMid := uint16((now >> 32) & 0xffff)
+	timeHi := uint16((now >> 48) & 0x0fff)
+	timeHi |= 0x1000 // Version 1
+
+	binary.BigEndian.PutUint32(uuid[0:], timeLow)
+	binary.BigEndian.PutUint16(uuid[4:], timeMid)
+	binary.BigEndian.PutUint16(uuid[6:], timeHi)
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+	copy(uuid[10:], nodeID[:])
+
+	return uuid, nil
+}

+ 38 - 0
backend/src/vendor/github.com/google/uuid/version4.go

@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New is creates a new random UUID or panics.  New is equivalent to
+// the expression
+//
+//    uuid.Must(uuid.NewRandom())
+func New() UUID {
+	return Must(NewRandom())
+}
+
+// NewRandom returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+//  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
+//  hit by a meteorite is estimated to be one chance in 17 billion, that
+//  means the probability is about 0.00000000006 (6 × 10−11),
+//  equivalent to the odds of creating a few tens of trillions of UUIDs in a
+//  year and having one duplicate.
+func NewRandom() (UUID, error) {
+	var uuid UUID
+	_, err := io.ReadFull(rander, uuid[:])
+	if err != nil {
+		return Nil, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}

+ 45 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/.travis.yml

@@ -0,0 +1,45 @@
+language: go
+
+go_import_path: gopkg.in/mgo.v2-unstable
+
+addons:
+    apt:
+        packages:
+
+env:
+    global:
+        - BUCKET=https://niemeyer.s3.amazonaws.com
+    matrix:
+        - GO=1.4.1 MONGODB=x86_64-2.2.7
+        - GO=1.4.1 MONGODB=x86_64-2.4.14
+        - GO=1.4.1 MONGODB=x86_64-2.6.11
+        - GO=1.4.1 MONGODB=x86_64-3.0.9
+        - GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal
+        - GO=1.5.3 MONGODB=x86_64-3.0.9
+        - GO=1.6   MONGODB=x86_64-3.0.9
+
+install:
+    - eval "$(gimme $GO)"
+
+    - wget $BUCKET/mongodb-linux-$MONGODB.tgz
+    - tar xzvf mongodb-linux-$MONGODB.tgz
+    - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH
+
+    - wget $BUCKET/daemontools.tar.gz
+    - tar xzvf daemontools.tar.gz
+    - export PATH=$PWD/daemontools:$PATH
+
+    - go get gopkg.in/check.v1
+    - go get gopkg.in/yaml.v2
+    - go get gopkg.in/tomb.v2
+
+before_script:
+    - export NOIPV6=1
+    - make startdb
+
+script:
+    - (cd bson && go test -check.v)
+    - go test -check.v -fast
+    - (cd txn && go test -check.v)
+
+# vim:sw=4:ts=4:et

+ 25 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/LICENSE

@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 5 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/Makefile

@@ -0,0 +1,5 @@
+startdb:
+	@harness/setup.sh start
+
+stopdb:
+	@harness/setup.sh stop

+ 4 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/README.md

@@ -0,0 +1,4 @@
+The MongoDB driver for Go
+-------------------------
+
+Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.

+ 467 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/auth.go

@@ -0,0 +1,467 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"sync"
+
+	"gopkg.in/mgo.v2-unstable/bson"
+	"gopkg.in/mgo.v2-unstable/internal/scram"
+)
+
+type authCmd struct {
+	Authenticate int
+
+	Nonce string
+	User  string
+	Key   string
+}
+
+type startSaslCmd struct {
+	StartSASL int `bson:"startSasl"`
+}
+
+type authResult struct {
+	ErrMsg string
+	Ok     bool
+}
+
+type getNonceCmd struct {
+	GetNonce int
+}
+
+type getNonceResult struct {
+	Nonce string
+	Err   string "$err"
+	Code  int
+}
+
+type logoutCmd struct {
+	Logout int
+}
+
+type saslCmd struct {
+	Start          int    `bson:"saslStart,omitempty"`
+	Continue       int    `bson:"saslContinue,omitempty"`
+	ConversationId int    `bson:"conversationId,omitempty"`
+	Mechanism      string `bson:"mechanism,omitempty"`
+	Payload        []byte
+}
+
+type saslResult struct {
+	Ok    bool `bson:"ok"`
+	NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
+	Done  bool
+
+	ConversationId int `bson:"conversationId"`
+	Payload        []byte
+	ErrMsg         string
+}
+
+type saslStepper interface {
+	Step(serverData []byte) (clientData []byte, done bool, err error)
+	Close()
+}
+
+func (socket *mongoSocket) getNonce() (nonce string, err error) {
+	socket.Lock()
+	for socket.cachedNonce == "" && socket.dead == nil {
+		debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
+		socket.gotNonce.Wait()
+	}
+	if socket.cachedNonce == "mongos" {
+		socket.Unlock()
+		return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
+	}
+	debugf("Socket %p to %s: got nonce", socket, socket.addr)
+	nonce, err = socket.cachedNonce, socket.dead
+	socket.cachedNonce = ""
+	socket.Unlock()
+	if err != nil {
+		nonce = ""
+	}
+	return
+}
+
+func (socket *mongoSocket) resetNonce() {
+	debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
+	op := &queryOp{}
+	op.query = &getNonceCmd{GetNonce: 1}
+	op.collection = "admin.$cmd"
+	op.limit = -1
+	op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+		if err != nil {
+			socket.kill(errors.New("getNonce: "+err.Error()), true)
+			return
+		}
+		result := &getNonceResult{}
+		err = bson.Unmarshal(docData, &result)
+		if err != nil {
+			socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
+			return
+		}
+		debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
+		if result.Code == 13390 {
+			// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
+			result.Nonce = "mongos"
+		} else if result.Nonce == "" {
+			var msg string
+			if result.Err != "" {
+				msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
+			} else {
+				msg = "Got an empty nonce"
+			}
+			socket.kill(errors.New(msg), true)
+			return
+		}
+		socket.Lock()
+		if socket.cachedNonce != "" {
+			socket.Unlock()
+			panic("resetNonce: nonce already cached")
+		}
+		socket.cachedNonce = result.Nonce
+		socket.gotNonce.Signal()
+		socket.Unlock()
+	}
+	err := socket.Query(op)
+	if err != nil {
+		socket.kill(errors.New("resetNonce: "+err.Error()), true)
+	}
+}
+
+func (socket *mongoSocket) Login(cred Credential) error {
+	socket.Lock()
+	if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
+		cred.Mechanism = "SCRAM-SHA-1"
+	}
+	for _, sockCred := range socket.creds {
+		if sockCred == cred {
+			debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
+			socket.Unlock()
+			return nil
+		}
+	}
+	if socket.dropLogout(cred) {
+		debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	}
+	socket.Unlock()
+
+	debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
+
+	var err error
+	switch cred.Mechanism {
+	case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
+		err = socket.loginClassic(cred)
+	case "PLAIN":
+		err = socket.loginPlain(cred)
+	case "MONGODB-X509":
+		err = socket.loginX509(cred)
+	default:
+		// Try SASL for everything else, if it is available.
+		err = socket.loginSASL(cred)
+	}
+
+	if err != nil {
+		debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
+	} else {
+		debugf("Socket %p to %s: login successful", socket, socket.addr)
+	}
+	return err
+}
+
+func (socket *mongoSocket) loginClassic(cred Credential) error {
+	// Note that this only works properly because this function is
+	// synchronous, which means the nonce won't get reset while we're
+	// using it and any other login requests will block waiting for a
+	// new nonce provided in the defer call below.
+	nonce, err := socket.getNonce()
+	if err != nil {
+		return err
+	}
+	defer socket.resetNonce()
+
+	psum := md5.New()
+	psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+
+	ksum := md5.New()
+	ksum.Write([]byte(nonce + cred.Username))
+	ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
+
+	key := hex.EncodeToString(ksum.Sum(nil))
+
+	cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
+	res := authResult{}
+	return socket.loginRun(cred.Source, &cmd, &res, func() error {
+		if !res.Ok {
+			return errors.New(res.ErrMsg)
+		}
+		socket.Lock()
+		socket.dropAuth(cred.Source)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	})
+}
+
+type authX509Cmd struct {
+	Authenticate int
+	User         string
+	Mechanism    string
+}
+
+func (socket *mongoSocket) loginX509(cred Credential) error {
+	cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
+	res := authResult{}
+	return socket.loginRun(cred.Source, &cmd, &res, func() error {
+		if !res.Ok {
+			return errors.New(res.ErrMsg)
+		}
+		socket.Lock()
+		socket.dropAuth(cred.Source)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	})
+}
+
+func (socket *mongoSocket) loginPlain(cred Credential) error {
+	cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
+	res := authResult{}
+	return socket.loginRun(cred.Source, &cmd, &res, func() error {
+		if !res.Ok {
+			return errors.New(res.ErrMsg)
+		}
+		socket.Lock()
+		socket.dropAuth(cred.Source)
+		socket.creds = append(socket.creds, cred)
+		socket.Unlock()
+		return nil
+	})
+}
+
+func (socket *mongoSocket) loginSASL(cred Credential) error {
+	var sasl saslStepper
+	var err error
+	if cred.Mechanism == "SCRAM-SHA-1" {
+		// SCRAM is handled without external libraries.
+		sasl = saslNewScram(cred)
+	} else if len(cred.ServiceHost) > 0 {
+		sasl, err = saslNew(cred, cred.ServiceHost)
+	} else {
+		sasl, err = saslNew(cred, socket.Server().Addr)
+	}
+	if err != nil {
+		return err
+	}
+	defer sasl.Close()
+
+	// The goal of this logic is to carry a locked socket until the
+	// local SASL step confirms the auth is valid; the socket needs to be
+	// locked so that concurrent action doesn't leave the socket in an
+	// auth state that doesn't reflect the operations that took place.
+	// As a simple case, imagine inverting login=>logout to logout=>login.
+	//
+	// The logic below works because the lock func isn't called concurrently.
+	locked := false
+	lock := func(b bool) {
+		if locked != b {
+			locked = b
+			if b {
+				socket.Lock()
+			} else {
+				socket.Unlock()
+			}
+		}
+	}
+
+	lock(true)
+	defer lock(false)
+
+	start := 1
+	cmd := saslCmd{}
+	res := saslResult{}
+	for {
+		payload, done, err := sasl.Step(res.Payload)
+		if err != nil {
+			return err
+		}
+		if done && res.Done {
+			socket.dropAuth(cred.Source)
+			socket.creds = append(socket.creds, cred)
+			break
+		}
+		lock(false)
+
+		cmd = saslCmd{
+			Start:          start,
+			Continue:       1 - start,
+			ConversationId: res.ConversationId,
+			Mechanism:      cred.Mechanism,
+			Payload:        payload,
+		}
+		start = 0
+		err = socket.loginRun(cred.Source, &cmd, &res, func() error {
+			// See the comment on lock for why this is necessary.
+			lock(true)
+			if !res.Ok || res.NotOk {
+				return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
+			}
+			return nil
+		})
+		if err != nil {
+			return err
+		}
+		if done && res.Done {
+			socket.dropAuth(cred.Source)
+			socket.creds = append(socket.creds, cred)
+			break
+		}
+	}
+
+	return nil
+}
+
+func saslNewScram(cred Credential) *saslScram {
+	credsum := md5.New()
+	credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+	client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
+	return &saslScram{cred: cred, client: client}
+}
+
+type saslScram struct {
+	cred   Credential
+	client *scram.Client
+}
+
+func (s *saslScram) Close() {}
+
+func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
+	more := s.client.Step(serverData)
+	return s.client.Out(), !more, s.client.Err()
+}
+
+func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
+	var mutex sync.Mutex
+	var replyErr error
+	mutex.Lock()
+
+	op := queryOp{}
+	op.query = query
+	op.collection = db + ".$cmd"
+	op.limit = -1
+	op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+		defer mutex.Unlock()
+
+		if err != nil {
+			replyErr = err
+			return
+		}
+
+		err = bson.Unmarshal(docData, result)
+		if err != nil {
+			replyErr = err
+		} else {
+			// Must handle this within the read loop for the socket, so
+			// that concurrent login requests are properly ordered.
+			replyErr = f()
+		}
+	}
+
+	err := socket.Query(&op)
+	if err != nil {
+		return err
+	}
+	mutex.Lock() // Wait.
+	return replyErr
+}
+
+func (socket *mongoSocket) Logout(db string) {
+	socket.Lock()
+	cred, found := socket.dropAuth(db)
+	if found {
+		debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
+		socket.logout = append(socket.logout, cred)
+	}
+	socket.Unlock()
+}
+
+func (socket *mongoSocket) LogoutAll() {
+	socket.Lock()
+	if l := len(socket.creds); l > 0 {
+		debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
+		socket.logout = append(socket.logout, socket.creds...)
+		socket.creds = socket.creds[0:0]
+	}
+	socket.Unlock()
+}
+
+func (socket *mongoSocket) flushLogout() (ops []interface{}) {
+	socket.Lock()
+	if l := len(socket.logout); l > 0 {
+		debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
+		for i := 0; i != l; i++ {
+			op := queryOp{}
+			op.query = &logoutCmd{1}
+			op.collection = socket.logout[i].Source + ".$cmd"
+			op.limit = -1
+			ops = append(ops, &op)
+		}
+		socket.logout = socket.logout[0:0]
+	}
+	socket.Unlock()
+	return
+}
+
+func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
+	for i, sockCred := range socket.creds {
+		if sockCred.Source == db {
+			copy(socket.creds[i:], socket.creds[i+1:])
+			socket.creds = socket.creds[:len(socket.creds)-1]
+			return sockCred, true
+		}
+	}
+	return cred, false
+}
+
+func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
+	for i, sockCred := range socket.logout {
+		if sockCred == cred {
+			copy(socket.logout[i:], socket.logout[i+1:])
+			socket.logout = socket.logout[:len(socket.logout)-1]
+			return true
+		}
+	}
+	return false
+}

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 1180 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/auth_test.go


+ 25 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/LICENSE

@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 738 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/bson.go

@@ -0,0 +1,738 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+//     http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+	"bytes"
+	"crypto/md5"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// A value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+	GetBSON() (interface{}, error)
+}
+
+// A value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.SetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+//     type MyString string
+//
+//     func (s *MyString) SetBSON(raw bson.Raw) error {
+//         return raw.Unmarshal(s)
+//     }
+//
+type Setter interface {
+	SetBSON(raw Raw) error
+}
+
+// SetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var SetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way.  For instance:
+//
+//     bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type.  Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+//     bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important.  If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+	Name  string
+	Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+	m = make(M, len(d))
+	for _, item := range d {
+		m[item.Name] = item.Value
+	}
+	return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+//     http://bsonspec.org/#/specification
+//
+type Raw struct {
+	Kind byte
+	Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// See the RawD type.
+type RawDocElem struct {
+	Name  string
+	Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+IDs
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+	d, err := hex.DecodeString(s)
+	if err != nil || len(d) != 12 {
+		panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+	}
+	return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+	if len(s) != 24 {
+		return false
+	}
+	_, err := hex.DecodeString(s)
+	return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot read random object id: %v", err))
+	}
+	return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+	var sum [3]byte
+	id := sum[:]
+	hostname, err1 := os.Hostname()
+	if err1 != nil {
+		_, err2 := io.ReadFull(rand.Reader, id)
+		if err2 != nil {
+			panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+		}
+		return id
+	}
+	hw := md5.New()
+	hw.Write([]byte(hostname))
+	copy(id, hw.Sum(nil))
+	return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+	var b [12]byte
+	// Timestamp, 4 bytes, big endian
+	binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+	// Machine, first 3 bytes of md5(hostname)
+	b[4] = machineId[0]
+	b[5] = machineId[1]
+	b[6] = machineId[2]
+	// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+	b[7] = byte(processId >> 8)
+	b[8] = byte(processId)
+	// Increment, 3 bytes, big endian
+	i := atomic.AddUint32(&objectIdCounter, 1)
+	b[9] = byte(i >> 16)
+	b[10] = byte(i >> 8)
+	b[11] = byte(i)
+	return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+	var b [12]byte
+	binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+	return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+	return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+	return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+	if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+		var v struct {
+			Id json.RawMessage `json:"$oid"`
+			Func struct {
+				Id json.RawMessage
+			} `json:"$oidFunc"`
+		}
+		err := jdec(data, &v)
+		if err == nil {
+			if len(v.Id) > 0 {
+				data = []byte(v.Id)
+			} else {
+				data = []byte(v.Func.Id)
+			}
+		}
+	}
+	if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+		*id = ""
+		return nil
+	}
+	if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[1:25])
+	if err != nil {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+	return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+	if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+		*id = ""
+		return nil
+	}
+	if len(data) != 24 {
+		return fmt.Errorf("invalid ObjectId: %s", data)
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[:])
+	if err != nil {
+		return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+	return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+	if len(id) != 12 {
+		panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+	}
+	return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+	// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+	secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+	return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+	return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+	return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+	b := id.byteSlice(9, 12)
+	// Counter is stored as big-endian 3-byte value
+	return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+	return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values.  Any kind should
+// work, but the following are known as of this writing:
+//
+//   0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+//   0x01 - Function (!?)
+//   0x02 - Obsolete generic.
+//   0x03 - UUID
+//   0x05 - MD5
+//   0x80 - User defined.
+//
+type Binary struct {
+	Kind byte
+	Data []byte
+}
+
+// RegEx represents a regular expression.  The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+	Pattern string
+	Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+	Code  string
+	Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+	Namespace string
+	Id        ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+	if r := recover(); r != nil {
+		if _, ok := r.(runtime.Error); ok {
+			panic(r)
+		} else if _, ok := r.(externalPanic); ok {
+			panic(r)
+		} else if s, ok := r.(string); ok {
+			*err = errors.New(s)
+		} else if e, ok := r.(error); ok {
+			*err = e
+		} else {
+			panic(r)
+		}
+	}
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty  Only include the field if it's not set to the zero
+//                value for the type or to empty slices or maps.
+//
+//     minsize    Marshal an int64 value as an int32, if that's feasible
+//                while preserving the numeric value.
+//
+//     inline     Inline the field, which must be a struct or a map,
+//                causing all of its fields or keys to be processed as if
+//                they were part of the outer struct. For maps, keys must
+//                not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := &encoder{make([]byte, 0, initialBufferSize)}
+	e.addDoc(reflect.ValueOf(in))
+	return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value.  The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+//     inline     Inline the field, which must be a struct or a map.
+//                Inlined structs are handled as if its fields were part
+//                of the outer struct. An inlined map causes keys that do
+//                not match any other struct field to be inserted in the
+//                map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data.  The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+//   value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+	if raw, ok := out.(*Raw); ok {
+		raw.Kind = 3
+		raw.Data = in
+		return nil
+	}
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(in)
+		d.readDocTo(v)
+	case reflect.Struct:
+		return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Unmarshal needs a map or a pointer to a struct.")
+	}
+	return nil
+}
+
+// Unmarshal deserializes raw into the out value.  If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		v = v.Elem()
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(raw.Data)
+		good := d.readElemTo(v, raw.Kind)
+		if !good {
+			return &TypeError{v.Type(), raw.Kind}
+		}
+	case reflect.Struct:
+		return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Raw Unmarshal needs a map or a valid pointer.")
+	}
+	return nil
+}
+
+type TypeError struct {
+	Type reflect.Type
+	Kind byte
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+	InlineMap  int
+	Zero       reflect.Value
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	MinSize   bool
+	Inline    []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+	return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	structMapMutex.RLock()
+	sinfo, found := structMap[st]
+	structMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("bson")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "minsize":
+					info.MinSize = true
+				case "inline":
+					inline = true
+				default:
+					msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+					panic(externalPanic(msg))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				panic("Option ,inline needs a struct value or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+	sinfo = &structInfo{
+		fieldsMap,
+		fieldsList,
+		inlineMap,
+		reflect.New(st).Elem(),
+	}
+	structMapMutex.Lock()
+	structMap[st] = sinfo
+	structMapMutex.Unlock()
+	return sinfo, nil
+}

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 1832 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/bson_test.go


+ 310 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/decimal.go

@@ -0,0 +1,310 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Inf"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 4109 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/decimal_test.go


+ 849 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/decode.go

@@ -0,0 +1,849 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+type decoder struct {
+	in      []byte
+	i       int
+	docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+	return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+	panic("Document is corrupted")
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+	setterUnknown = iota
+	setterNone
+	setterType
+	setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+	var iface Setter
+	setterIface = reflect.TypeOf(&iface).Elem()
+	setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+	setterMutex.RLock()
+	style := setterStyles[outt]
+	setterMutex.RUnlock()
+	if style == setterUnknown {
+		setterMutex.Lock()
+		defer setterMutex.Unlock()
+		if outt.Implements(setterIface) {
+			setterStyles[outt] = setterType
+		} else if reflect.PtrTo(outt).Implements(setterIface) {
+			setterStyles[outt] = setterAddr
+		} else {
+			setterStyles[outt] = setterNone
+		}
+		style = setterStyles[outt]
+	}
+	return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+	style := setterStyle(outt)
+	if style == setterNone {
+		return nil
+	}
+	if style == setterAddr {
+		if !out.CanAddr() {
+			return nil
+		}
+		out = out.Addr()
+	} else if outt.Kind() == reflect.Ptr && out.IsNil() {
+		out.Set(reflect.New(outt.Elem()))
+	}
+	return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+	var none reflect.Value
+	for _, k := range m.MapKeys() {
+		m.SetMapIndex(k, none)
+	}
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+	var elemType reflect.Type
+	outt := out.Type()
+	outk := outt.Kind()
+
+	for {
+		if outk == reflect.Ptr && out.IsNil() {
+			out.Set(reflect.New(outt.Elem()))
+		}
+		if setter := getSetter(outt, out); setter != nil {
+			var raw Raw
+			d.readDocTo(reflect.ValueOf(&raw))
+			err := setter.SetBSON(raw)
+			if _, ok := err.(*TypeError); err != nil && !ok {
+				panic(err)
+			}
+			return
+		}
+		if outk == reflect.Ptr {
+			out = out.Elem()
+			outt = out.Type()
+			outk = out.Kind()
+			continue
+		}
+		break
+	}
+
+	var fieldsMap map[string]fieldInfo
+	var inlineMap reflect.Value
+	start := d.i
+
+	origout := out
+	if outk == reflect.Interface {
+		if d.docType.Kind() == reflect.Map {
+			mv := reflect.MakeMap(d.docType)
+			out.Set(mv)
+			out = mv
+		} else {
+			dv := reflect.New(d.docType).Elem()
+			out.Set(dv)
+			out = dv
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	docType := d.docType
+	keyType := typeString
+	convertKey := false
+	switch outk {
+	case reflect.Map:
+		keyType = outt.Key()
+		if keyType.Kind() != reflect.String {
+			panic("BSON map must have string keys. Got: " + outt.String())
+		}
+		if keyType != typeString {
+			convertKey = true
+		}
+		elemType = outt.Elem()
+		if elemType == typeIface {
+			d.docType = outt
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(out.Type()))
+		} else if out.Len() > 0 {
+			clearMap(out)
+		}
+	case reflect.Struct:
+		if outt != typeRaw {
+			sinfo, err := getStructInfo(out.Type())
+			if err != nil {
+				panic(err)
+			}
+			fieldsMap = sinfo.FieldsMap
+			out.Set(sinfo.Zero)
+			if sinfo.InlineMap != -1 {
+				inlineMap = out.Field(sinfo.InlineMap)
+				if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+					clearMap(inlineMap)
+				}
+				elemType = inlineMap.Type().Elem()
+				if elemType == typeIface {
+					d.docType = inlineMap.Type()
+				}
+			}
+		}
+	case reflect.Slice:
+		switch outt.Elem() {
+		case typeDocElem:
+			origout.Set(d.readDocElems(outt))
+			return
+		case typeRawDocElem:
+			origout.Set(d.readRawDocElems(outt))
+			return
+		}
+		fallthrough
+	default:
+		panic("Unsupported document type for unmarshalling: " + out.Type().String())
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+
+		switch outk {
+		case reflect.Map:
+			e := reflect.New(elemType).Elem()
+			if d.readElemTo(e, kind) {
+				k := reflect.ValueOf(name)
+				if convertKey {
+					k = k.Convert(keyType)
+				}
+				out.SetMapIndex(k, e)
+			}
+		case reflect.Struct:
+			if outt == typeRaw {
+				d.dropElem(kind)
+			} else {
+				if info, ok := fieldsMap[name]; ok {
+					if info.Inline == nil {
+						d.readElemTo(out.Field(info.Num), kind)
+					} else {
+						d.readElemTo(out.FieldByIndex(info.Inline), kind)
+					}
+				} else if inlineMap.IsValid() {
+					if inlineMap.IsNil() {
+						inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+					}
+					e := reflect.New(elemType).Elem()
+					if d.readElemTo(e, kind) {
+						inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+					}
+				} else {
+					d.dropElem(kind)
+				}
+			}
+		case reflect.Slice:
+		}
+
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+	d.docType = docType
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
+	}
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	i := 0
+	l := out.Len()
+	for d.in[d.i] != '\x00' {
+		if i >= l {
+			panic("Length mismatch on array field")
+		}
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		d.readElemTo(out.Index(i), kind)
+		if d.i >= end {
+			corrupted()
+		}
+		i++
+	}
+	if i != l {
+		panic("Length mismatch on array field")
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+	tmp := make([]reflect.Value, 0, 8)
+	elemType := t.Elem()
+	if elemType == typeRawDocElem {
+		d.dropElem(0x04)
+		return reflect.Zero(t).Interface()
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		e := reflect.New(elemType).Elem()
+		if d.readElemTo(e, kind) {
+			tmp = append(tmp, e)
+		}
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+
+	n := len(tmp)
+	slice := reflect.MakeSlice(t, n, n)
+	for i := 0; i != n; i++ {
+		slice.Index(i).Set(tmp[i])
+	}
+	return slice.Interface()
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]DocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := DocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]RawDocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := RawDocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+		f(kind, name)
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+
+var blackHole = settableValueOf(struct{}{})
+
+func (d *decoder) dropElem(kind byte) {
+	d.readElemTo(blackHole, kind)
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+
+	start := d.i
+
+	if kind == 0x03 {
+		// Delegate unmarshaling of documents.
+		outt := out.Type()
+		outk := out.Kind()
+		switch outk {
+		case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+			d.readDocTo(out)
+			return true
+		}
+		if setterStyle(outt) != setterNone {
+			d.readDocTo(out)
+			return true
+		}
+		if outk == reflect.Slice {
+			switch outt.Elem() {
+			case typeDocElem:
+				out.Set(d.readDocElems(outt))
+			case typeRawDocElem:
+				out.Set(d.readRawDocElems(outt))
+			default:
+				d.readDocTo(blackHole)
+			}
+			return true
+		}
+		d.readDocTo(blackHole)
+		return true
+	}
+
+	var in interface{}
+
+	switch kind {
+	case 0x01: // Float64
+		in = d.readFloat64()
+	case 0x02: // UTF-8 string
+		in = d.readStr()
+	case 0x03: // Document
+		panic("Can't happen. Handled above.")
+	case 0x04: // Array
+		outt := out.Type()
+		if setterStyle(outt) != setterNone {
+			// Skip the value so its data is handed to the setter below.
+			d.dropElem(kind)
+			break
+		}
+		for outt.Kind() == reflect.Ptr {
+			outt = outt.Elem()
+		}
+		switch outt.Kind() {
+		case reflect.Array:
+			d.readArrayDocTo(out)
+			return true
+		case reflect.Slice:
+			in = d.readSliceDoc(outt)
+		default:
+			in = d.readSliceDoc(typeSlice)
+		}
+	case 0x05: // Binary
+		b := d.readBinary()
+		if b.Kind == 0x00 || b.Kind == 0x02 {
+			in = b.Data
+		} else {
+			in = b
+		}
+	case 0x06: // Undefined (obsolete, but still seen in the wild)
+		in = Undefined
+	case 0x07: // ObjectId
+		in = ObjectId(d.readBytes(12))
+	case 0x08: // Bool
+		in = d.readBool()
+	case 0x09: // Timestamp
+		// MongoDB handles timestamps as milliseconds.
+		i := d.readInt64()
+		if i == -62135596800000 {
+			in = time.Time{} // In UTC for convenience.
+		} else {
+			in = time.Unix(i/1e3, i%1e3*1e6)
+		}
+	case 0x0A: // Nil
+		in = nil
+	case 0x0B: // RegEx
+		in = d.readRegEx()
+	case 0x0C:
+		in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+	case 0x0D: // JavaScript without scope
+		in = JavaScript{Code: d.readStr()}
+	case 0x0E: // Symbol
+		in = Symbol(d.readStr())
+	case 0x0F: // JavaScript with scope
+		d.i += 4 // Skip length
+		js := JavaScript{d.readStr(), make(M)}
+		d.readDocTo(reflect.ValueOf(js.Scope))
+		in = js
+	case 0x10: // Int32
+		in = int(d.readInt32())
+	case 0x11: // Mongo-specific timestamp
+		in = MongoTimestamp(d.readInt64())
+	case 0x12: // Int64
+		in = d.readInt64()
+	case 0x13: // Decimal128
+		in = Decimal128{
+			l: uint64(d.readInt64()),
+			h: uint64(d.readInt64()),
+		}
+	case 0x7F: // Max key
+		in = MaxKey
+	case 0xFF: // Min key
+		in = MinKey
+	default:
+		panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+	}
+
+	outt := out.Type()
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
+		return true
+	}
+
+	if setter := getSetter(outt, out); setter != nil {
+		err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
+		if err == SetZero {
+			out.Set(reflect.Zero(outt))
+			return true
+		}
+		if err == nil {
+			return true
+		}
+		if _, ok := err.(*TypeError); !ok {
+			panic(err)
+		}
+		return false
+	}
+
+	if in == nil {
+		out.Set(reflect.Zero(outt))
+		return true
+	}
+
+	outk := outt.Kind()
+
+	// Dereference and initialize pointer if necessary.
+	first := true
+	for outk == reflect.Ptr {
+		if !out.IsNil() {
+			out = out.Elem()
+		} else {
+			elem := reflect.New(outt.Elem())
+			if first {
+				// Only set if value is compatible.
+				first = false
+				defer func(out, elem reflect.Value) {
+					if good {
+						out.Set(elem)
+					}
+				}(out, elem)
+			} else {
+				out.Set(elem)
+			}
+			out = elem
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	inv := reflect.ValueOf(in)
+	if outt == inv.Type() {
+		out.Set(inv)
+		return true
+	}
+
+	switch outk {
+	case reflect.Interface:
+		out.Set(inv)
+		return true
+	case reflect.String:
+		switch inv.Kind() {
+		case reflect.String:
+			out.SetString(inv.String())
+			return true
+		case reflect.Slice:
+			if b, ok := in.([]byte); ok {
+				out.SetString(string(b))
+				return true
+			}
+		case reflect.Int, reflect.Int64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatInt(inv.Int(), 10))
+				return true
+			}
+		case reflect.Float64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+				return true
+			}
+		}
+	case reflect.Slice, reflect.Array:
+		// Remember, array (0x04) slices are built with the correct
+		// element type.  If we are here, must be a cross BSON kind
+		// conversion (e.g. 0x05 unmarshalling on string).
+		if outt.Elem().Kind() != reflect.Uint8 {
+			break
+		}
+		switch inv.Kind() {
+		case reflect.String:
+			slice := []byte(inv.String())
+			out.Set(reflect.ValueOf(slice))
+			return true
+		case reflect.Slice:
+			switch outt.Kind() {
+			case reflect.Array:
+				reflect.Copy(out, inv)
+			case reflect.Slice:
+				out.SetBytes(inv.Bytes())
+			}
+			return true
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetInt(inv.Int())
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetInt(int64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetInt(1)
+			} else {
+				out.SetInt(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("can't happen: no uint types in BSON (!?)")
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetUint(uint64(inv.Int()))
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetUint(uint64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetUint(1)
+			} else {
+				out.SetUint(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON.")
+		}
+	case reflect.Float32, reflect.Float64:
+		switch inv.Kind() {
+		case reflect.Float32, reflect.Float64:
+			out.SetFloat(inv.Float())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetFloat(float64(inv.Int()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetFloat(1)
+			} else {
+				out.SetFloat(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Bool:
+		switch inv.Kind() {
+		case reflect.Bool:
+			out.SetBool(inv.Bool())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetBool(inv.Int() != 0)
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetBool(inv.Float() != 0)
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Struct:
+		if outt == typeURL && inv.Kind() == reflect.String {
+			u, err := url.Parse(inv.String())
+			if err != nil {
+				panic(err)
+			}
+			out.Set(reflect.ValueOf(u).Elem())
+			return true
+		}
+		if outt == typeBinary {
+			if b, ok := in.([]byte); ok {
+				out.Set(reflect.ValueOf(Binary{Data: b}))
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+	re := RegEx{}
+	re.Pattern = d.readCStr()
+	re.Options = d.readCStr()
+	return re
+}
+
+func (d *decoder) readBinary() Binary {
+	l := d.readInt32()
+	b := Binary{}
+	b.Kind = d.readByte()
+	b.Data = d.readBytes(l)
+	if b.Kind == 0x02 && len(b.Data) >= 4 {
+		// Weird obsolete format with redundant length.
+		b.Data = b.Data[4:]
+	}
+	return b
+}
+
+func (d *decoder) readStr() string {
+	l := d.readInt32()
+	b := d.readBytes(l - 1)
+	if d.readByte() != '\x00' {
+		corrupted()
+	}
+	return string(b)
+}
+
+func (d *decoder) readCStr() string {
+	start := d.i
+	end := start
+	l := len(d.in)
+	for ; end != l; end++ {
+		if d.in[end] == '\x00' {
+			break
+		}
+	}
+	d.i = end + 1
+	if d.i > l {
+		corrupted()
+	}
+	return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+	b := d.readByte()
+	if b == 0 {
+		return false
+	}
+	if b == 1 {
+		return true
+	}
+	panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+	return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+	b := d.readBytes(4)
+	return int32((uint32(b[0]) << 0) |
+		(uint32(b[1]) << 8) |
+		(uint32(b[2]) << 16) |
+		(uint32(b[3]) << 24))
+}
+
+func (d *decoder) readInt64() int64 {
+	b := d.readBytes(8)
+	return int64((uint64(b[0]) << 0) |
+		(uint64(b[1]) << 8) |
+		(uint64(b[2]) << 16) |
+		(uint64(b[3]) << 24) |
+		(uint64(b[4]) << 32) |
+		(uint64(b[5]) << 40) |
+		(uint64(b[6]) << 48) |
+		(uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+	i := d.i
+	d.i++
+	if d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+	if length < 0 {
+		corrupted()
+	}
+	start := d.i
+	d.i += int(length)
+	if d.i < start || d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[start : start+int(length)]
+}

+ 514 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/encode.go

@@ -0,0 +1,514 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+	typeBinary         = reflect.TypeOf(Binary{})
+	typeObjectId       = reflect.TypeOf(ObjectId(""))
+	typeDBPointer      = reflect.TypeOf(DBPointer{"", ObjectId("")})
+	typeSymbol         = reflect.TypeOf(Symbol(""))
+	typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+	typeOrderKey       = reflect.TypeOf(MinKey)
+	typeDocElem        = reflect.TypeOf(DocElem{})
+	typeRawDocElem     = reflect.TypeOf(RawDocElem{})
+	typeRaw            = reflect.TypeOf(Raw{})
+	typeURL            = reflect.TypeOf(url.URL{})
+	typeTime           = reflect.TypeOf(time.Time{})
+	typeString         = reflect.TypeOf("")
+	typeJSONNumber     = reflect.TypeOf(json.Number(""))
+)
+
+const itoaCacheSize = 32
+
+var itoaCache []string
+
+func init() {
+	itoaCache = make([]string, itoaCacheSize)
+	for i := 0; i != itoaCacheSize; i++ {
+		itoaCache[i] = strconv.Itoa(i)
+	}
+}
+
+func itoa(i int) string {
+	if i < itoaCacheSize {
+		return itoaCache[i]
+	}
+	return strconv.Itoa(i)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+	out []byte
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+	for {
+		if vi, ok := v.Interface().(Getter); ok {
+			getv, err := vi.GetBSON()
+			if err != nil {
+				panic(err)
+			}
+			v = reflect.ValueOf(getv)
+			continue
+		}
+		if v.Kind() == reflect.Ptr {
+			v = v.Elem()
+			continue
+		}
+		break
+	}
+
+	if v.Type() == typeRaw {
+		raw := v.Interface().(Raw)
+		if raw.Kind != 0x03 && raw.Kind != 0x00 {
+			panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+		}
+		if len(raw.Data) == 0 {
+			panic("Attempted to marshal empty Raw document")
+		}
+		e.addBytes(raw.Data...)
+		return
+	}
+
+	start := e.reserveInt32()
+
+	switch v.Kind() {
+	case reflect.Map:
+		e.addMap(v)
+	case reflect.Struct:
+		e.addStruct(v)
+	case reflect.Array, reflect.Slice:
+		e.addSlice(v)
+	default:
+		panic("Can't marshal " + v.Type().String() + " as a BSON document")
+	}
+
+	e.addBytes(0)
+	e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+	for _, k := range v.MapKeys() {
+		e.addElem(k.String(), v.MapIndex(k), false)
+	}
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+	sinfo, err := getStructInfo(v.Type())
+	if err != nil {
+		panic(err)
+	}
+	var value reflect.Value
+	if sinfo.InlineMap >= 0 {
+		m := v.Field(sinfo.InlineMap)
+		if m.Len() > 0 {
+			for _, k := range m.MapKeys() {
+				ks := k.String()
+				if _, found := sinfo.FieldsMap[ks]; found {
+					panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+				}
+				e.addElem(ks, m.MapIndex(k), false)
+			}
+		}
+	}
+	for _, info := range sinfo.FieldsList {
+		if info.Inline == nil {
+			value = v.Field(info.Num)
+		} else {
+			value = v.FieldByIndex(info.Inline)
+		}
+		if info.OmitEmpty && isZero(value) {
+			continue
+		}
+		e.addElem(info.Key, value, info.MinSize)
+	}
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		if vt == typeTime {
+			return v.Interface().(time.Time).IsZero()
+		}
+		for i := 0; i < v.NumField(); i++ {
+			if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+	vi := v.Interface()
+	if d, ok := vi.(D); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if d, ok := vi.(RawD); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	l := v.Len()
+	et := v.Type().Elem()
+	if et == typeDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(DocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if et == typeRawDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(RawDocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	for i := 0; i < l; i++ {
+		e.addElem(itoa(i), v.Index(i), false)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+	e.addBytes(kind)
+	e.addBytes([]byte(name)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+	if !v.IsValid() {
+		e.addElemName(0x0A, name)
+		return
+	}
+
+	if getter, ok := v.Interface().(Getter); ok {
+		getv, err := getter.GetBSON()
+		if err != nil {
+			panic(err)
+		}
+		e.addElem(name, reflect.ValueOf(getv), minSize)
+		return
+	}
+
+	switch v.Kind() {
+
+	case reflect.Interface:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.Ptr:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.String:
+		s := v.String()
+		switch v.Type() {
+		case typeObjectId:
+			if len(s) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s)) + ")")
+			}
+			e.addElemName(0x07, name)
+			e.addBytes([]byte(s)...)
+		case typeSymbol:
+			e.addElemName(0x0E, name)
+			e.addStr(s)
+		case typeJSONNumber:
+			n := v.Interface().(json.Number)
+			if i, err := n.Int64(); err == nil {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			} else if f, err := n.Float64(); err == nil {
+				e.addElemName(0x01, name)
+				e.addFloat64(f)
+			} else {
+				panic("failed to convert json.Number to a number: " + s)
+			}
+		default:
+			e.addElemName(0x02, name)
+			e.addStr(s)
+		}
+
+	case reflect.Float32, reflect.Float64:
+		e.addElemName(0x01, name)
+		e.addFloat64(v.Float())
+
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		u := v.Uint()
+		if int64(u) < 0 {
+			panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+		} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+			e.addElemName(0x10, name)
+			e.addInt32(int32(u))
+		} else {
+			e.addElemName(0x12, name)
+			e.addInt64(int64(u))
+		}
+
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch v.Type() {
+		case typeMongoTimestamp:
+			e.addElemName(0x11, name)
+			e.addInt64(v.Int())
+
+		case typeOrderKey:
+			if v.Int() == int64(MaxKey) {
+				e.addElemName(0x7F, name)
+			} else {
+				e.addElemName(0xFF, name)
+			}
+
+		default:
+			i := v.Int()
+			if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+				// It fits into an int32, encode as such.
+				e.addElemName(0x10, name)
+				e.addInt32(int32(i))
+			} else {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			}
+		}
+
+	case reflect.Bool:
+		e.addElemName(0x08, name)
+		if v.Bool() {
+			e.addBytes(1)
+		} else {
+			e.addBytes(0)
+		}
+
+	case reflect.Map:
+		e.addElemName(0x03, name)
+		e.addDoc(v)
+
+	case reflect.Slice:
+		vt := v.Type()
+		et := vt.Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			e.addBinary(0x00, v.Bytes())
+		} else if et == typeDocElem || et == typeRawDocElem {
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Array:
+		et := v.Type().Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			if v.CanAddr() {
+				e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+			} else {
+				n := v.Len()
+				e.addInt32(int32(n))
+				e.addBytes(0x00)
+				for i := 0; i < n; i++ {
+					el := v.Index(i)
+					e.addBytes(byte(el.Uint()))
+				}
+			}
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Struct:
+		switch s := v.Interface().(type) {
+
+		case Raw:
+			kind := s.Kind
+			if kind == 0x00 {
+				kind = 0x03
+			}
+			if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+				panic("Attempted to marshal empty Raw document")
+			}
+			e.addElemName(kind, name)
+			e.addBytes(s.Data...)
+
+		case Binary:
+			e.addElemName(0x05, name)
+			e.addBinary(s.Kind, s.Data)
+
+		case Decimal128:
+			e.addElemName(0x13, name)
+			e.addInt64(int64(s.l))
+			e.addInt64(int64(s.h))
+
+		case DBPointer:
+			e.addElemName(0x0C, name)
+			e.addStr(s.Namespace)
+			if len(s.Id) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s.Id)) + ")")
+			}
+			e.addBytes([]byte(s.Id)...)
+
+		case RegEx:
+			e.addElemName(0x0B, name)
+			e.addCStr(s.Pattern)
+			e.addCStr(s.Options)
+
+		case JavaScript:
+			if s.Scope == nil {
+				e.addElemName(0x0D, name)
+				e.addStr(s.Code)
+			} else {
+				e.addElemName(0x0F, name)
+				start := e.reserveInt32()
+				e.addStr(s.Code)
+				e.addDoc(reflect.ValueOf(s.Scope))
+				e.setInt32(start, int32(len(e.out)-start))
+			}
+
+		case time.Time:
+			// MongoDB handles timestamps as milliseconds.
+			e.addElemName(0x09, name)
+			e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+		case url.URL:
+			e.addElemName(0x02, name)
+			e.addStr(s.String())
+
+		case undefined:
+			e.addElemName(0x06, name)
+
+		default:
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		}
+
+	default:
+		panic("Can't marshal " + v.Type().String() + " in a BSON document")
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+	if subtype == 0x02 {
+		// Wonder how that brilliant idea came to life. Obsolete, luckily.
+		e.addInt32(int32(len(v) + 4))
+		e.addBytes(subtype)
+		e.addInt32(int32(len(v)))
+	} else {
+		e.addInt32(int32(len(v)))
+		e.addBytes(subtype)
+	}
+	e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+	e.addInt32(int32(len(v) + 1))
+	e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+	e.addBytes([]byte(v)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+	pos = len(e.out)
+	e.addBytes(0, 0, 0, 0)
+	return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+	e.out[pos+0] = byte(v)
+	e.out[pos+1] = byte(v >> 8)
+	e.out[pos+2] = byte(v >> 16)
+	e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+	u := uint32(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+	u := uint64(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+		byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+	e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+	e.out = append(e.out, v...)
+}

+ 380 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/json.go

@@ -0,0 +1,380 @@
+package bson
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"gopkg.in/mgo.v2-unstable/internal/json"
+	"strconv"
+	"time"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&jsonExt)
+	return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+	e := json.NewEncoder(&buf)
+	e.Extend(&jsonExt)
+	err := e.Encode(value)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&funcExt)
+	return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+	jsonExt.DecodeUnquotedKeys(true)
+	jsonExt.DecodeTrailingCommas(true)
+
+	funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+	jsonExt.DecodeKeyed("$binary", jdecBinary)
+	jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+	jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+	jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+	funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+	funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+	jsonExt.DecodeKeyed("$date", jdecDate)
+	jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+	jsonExt.EncodeType(time.Time{}, jencDate)
+
+	funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+	jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+	jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+	funcExt.DecodeConst("undefined", Undefined)
+
+	jsonExt.DecodeKeyed("$regex", jdecRegEx)
+	jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+	funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+	jsonExt.DecodeKeyed("$oid", jdecObjectId)
+	jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+	jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+	funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+	jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+	funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+	jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+	jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+	jsonExt.EncodeType(int64(0), jencNumberLong)
+	jsonExt.EncodeType(int(0), jencInt)
+
+	funcExt.DecodeConst("MinKey", MinKey)
+	funcExt.DecodeConst("MaxKey", MaxKey)
+	jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+	jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+	jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+	jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+	jsonExt.EncodeType(Undefined, jencUndefined)
+
+	jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, format, args...)
+	return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+	var v struct {
+		Binary []byte `json:"$binary"`
+		Type   string `json:"$type"`
+		Func   struct {
+			Binary []byte `json:"$binary"`
+			Type   int64  `json:"$type"`
+		} `json:"$binaryFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+
+	var binData []byte
+	var binKind int64
+	if v.Type == "" && v.Binary == nil {
+		binData = v.Func.Binary
+		binKind = v.Func.Type
+	} else if v.Type == "" {
+		return v.Binary, nil
+	} else {
+		binData = v.Binary
+		binKind, err = strconv.ParseInt(v.Type, 0, 64)
+		if err != nil {
+			binKind = -1
+		}
+	}
+
+	if binKind == 0 {
+		return binData, nil
+	}
+	if binKind < 0 || binKind > 255 {
+		return nil, fmt.Errorf("invalid type in binary object: %s", data)
+	}
+
+	return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+	in := v.([]byte)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+	base64.StdEncoding.Encode(out, in)
+	return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+	in := v.(Binary)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+	base64.StdEncoding.Encode(out, in.Data)
+	return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z"
+
+func jdecDate(data []byte) (interface{}, error) {
+	var v struct {
+		S    string `json:"$date"`
+		Func struct {
+			S string
+		} `json:"$dateFunc"`
+	}
+	_ = jdec(data, &v)
+	if v.S == "" {
+		v.S = v.Func.S
+	}
+	if v.S != "" {
+		for _, format := range []string{jdateFormat, "2006-01-02"} {
+			t, err := time.Parse(format, v.S)
+			if err == nil {
+				return t, nil
+			}
+		}
+		return nil, fmt.Errorf("cannot parse date: %q", v.S)
+	}
+
+	var vn struct {
+		Date struct {
+			N int64 `json:"$numberLong,string"`
+		} `json:"$date"`
+		Func struct {
+			S int64
+		} `json:"$dateFunc"`
+	}
+	err := jdec(data, &vn)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse date: %q", data)
+	}
+	n := vn.Date.N
+	if n == 0 {
+		n = vn.Func.S
+	}
+	return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+	t := v.(time.Time)
+	return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+	var v struct {
+		Func struct {
+			T int32 `json:"t"`
+			I int32 `json:"i"`
+		} `json:"$timestamp"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+	ts := uint64(v.(MongoTimestamp))
+	return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+	var v struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+	re := v.(RegEx)
+	type regex struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+	var v struct {
+		Id   string `json:"$oid"`
+		Func struct {
+			Id string
+		} `json:"$oidFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.Id == "" {
+		v.Id = v.Func.Id
+	}
+	return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+	return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+	// TODO Support unmarshaling $ref and $id into the input value.
+	var v struct {
+		Obj map[string]interface{} `json:"$dbrefFunc"`
+	}
+	// TODO Fix this. Must not be required.
+	v.Obj = make(map[string]interface{})
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+	var v struct {
+		N    int64 `json:"$numberLong,string"`
+		Func struct {
+			N int64 `json:",string"`
+		} `json:"$numberLongFunc"`
+	}
+	var vn struct {
+		N    int64 `json:"$numberLong"`
+		Func struct {
+			N int64
+		} `json:"$numberLongFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		err = jdec(data, &vn)
+		v.N = vn.N
+		v.Func.N = vn.Func.N
+	}
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 0 {
+		return v.N, nil
+	}
+	return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+	n := v.(int64)
+	f := `{"$numberLong":"%d"}`
+	if n <= 1<<53 {
+		f = `{"$numberLong":%d}`
+	}
+	return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+	n := v.(int)
+	f := `{"$numberLong":"%d"}`
+	if n <= 1<<53 {
+		f = `%d`
+	}
+	return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$minKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $minKey object: %s", data)
+	}
+	return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$maxKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+	}
+	return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+	switch v.(orderKey) {
+	case MinKey:
+		return []byte(`{"$minKey":1}`), nil
+	case MaxKey:
+		return []byte(`{"$maxKey":1}`), nil
+	}
+	panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+	var v struct {
+		B bool `json:"$undefined"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if !v.B {
+		return nil, fmt.Errorf("invalid $undefined object: %s", data)
+	}
+	return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+	return []byte(`{"$undefined":true}`), nil
+}

+ 184 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/json_test.go

@@ -0,0 +1,184 @@
+package bson_test
+
+import (
+	"gopkg.in/mgo.v2-unstable/bson"
+
+	. "gopkg.in/check.v1"
+	"reflect"
+	"strings"
+	"time"
+)
+
+type jsonTest struct {
+	a interface{} // value encoded into JSON (optional)
+	b string      // JSON expected as output of <a>, and used as input to <c>
+	c interface{} // Value expected from decoding <b>, defaults to <a>
+	e string      // error string, if decoding (b) should fail
+}
+
+var jsonTests = []jsonTest{
+	// $binary
+	{
+		a: []byte("foo"),
+		b: `{"$binary":"Zm9v","$type":"0x0"}`,
+	}, {
+		a: bson.Binary{Kind: 2, Data: []byte("foo")},
+		b: `{"$binary":"Zm9v","$type":"0x2"}`,
+	}, {
+		b: `BinData(2,"Zm9v")`,
+		c: bson.Binary{Kind: 2, Data: []byte("foo")},
+	},
+
+	// $date
+	{
+		a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
+		b: `{"$date":"2016-05-15T01:02:03.004Z"}`,
+	}, {
+		b: `{"$date": {"$numberLong": "1002"}}`,
+		c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC),
+	}, {
+		b: `ISODate("2016-05-15T01:02:03.004Z")`,
+		c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
+	}, {
+		b: `new Date(1000)`,
+		c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
+	}, {
+		b: `new Date("2016-05-15")`,
+		c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC),
+	},
+
+	// $timestamp
+	{
+		a: bson.MongoTimestamp(4294967298),
+		b: `{"$timestamp":{"t":1,"i":2}}`,
+	}, {
+		b: `Timestamp(1, 2)`,
+		c: bson.MongoTimestamp(4294967298),
+	},
+
+	// $regex
+	{
+		a: bson.RegEx{"pattern", "options"},
+		b: `{"$regex":"pattern","$options":"options"}`,
+	},
+
+	// $oid
+	{
+		a: bson.ObjectIdHex("0123456789abcdef01234567"),
+		b: `{"$oid":"0123456789abcdef01234567"}`,
+	}, {
+		b: `ObjectId("0123456789abcdef01234567")`,
+		c: bson.ObjectIdHex("0123456789abcdef01234567"),
+	},
+
+	// $ref (no special type)
+	{
+		b: `DBRef("name", "id")`,
+		c: map[string]interface{}{"$ref": "name", "$id": "id"},
+	},
+
+	// $numberLong
+	{
+		a: 123,
+		b: `123`,
+	}, {
+		a: int64(9007199254740992),
+		b: `{"$numberLong":9007199254740992}`,
+	}, {
+		a: int64(1<<53 + 1),
+		b: `{"$numberLong":"9007199254740993"}`,
+	}, {
+		a: 1<<53 + 1,
+		b: `{"$numberLong":"9007199254740993"}`,
+		c: int64(9007199254740993),
+	}, {
+		b: `NumberLong(9007199254740992)`,
+		c: int64(1 << 53),
+	}, {
+		b: `NumberLong("9007199254740993")`,
+		c: int64(1<<53 + 1),
+	},
+
+	// $minKey, $maxKey
+	{
+		a: bson.MinKey,
+		b: `{"$minKey":1}`,
+	}, {
+		a: bson.MaxKey,
+		b: `{"$maxKey":1}`,
+	}, {
+		b: `MinKey`,
+		c: bson.MinKey,
+	}, {
+		b: `MaxKey`,
+		c: bson.MaxKey,
+	}, {
+		b: `{"$minKey":0}`,
+		e: `invalid $minKey object: {"$minKey":0}`,
+	}, {
+		b: `{"$maxKey":0}`,
+		e: `invalid $maxKey object: {"$maxKey":0}`,
+	},
+
+	{
+		a: bson.Undefined,
+		b: `{"$undefined":true}`,
+	}, {
+		b: `undefined`,
+		c: bson.Undefined,
+	}, {
+		b: `{"v": undefined}`,
+		c: struct{ V interface{} }{bson.Undefined},
+	},
+
+	// Unquoted keys and trailing commas
+	{
+		b: `{$foo: ["bar",],}`,
+		c: map[string]interface{}{"$foo": []interface{}{"bar"}},
+	},
+}
+
+func (s *S) TestJSON(c *C) {
+	for i, item := range jsonTests {
+		c.Logf("------------ (#%d)", i)
+		c.Logf("A: %#v", item.a)
+		c.Logf("B: %#v", item.b)
+
+		if item.c == nil {
+			item.c = item.a
+		} else {
+			c.Logf("C: %#v", item.c)
+		}
+		if item.e != "" {
+			c.Logf("E: %s", item.e)
+		}
+
+		if item.a != nil {
+			data, err := bson.MarshalJSON(item.a)
+			c.Assert(err, IsNil)
+			c.Logf("Dumped: %#v", string(data))
+			c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b)
+		}
+
+		var zero interface{}
+		if item.c == nil {
+			zero = &struct{}{}
+		} else {
+			zero = reflect.New(reflect.TypeOf(item.c)).Interface()
+		}
+		err := bson.UnmarshalJSON([]byte(item.b), zero)
+		if item.e != "" {
+			c.Assert(err, NotNil)
+			c.Assert(err.Error(), Equals, item.e)
+			continue
+		}
+		c.Assert(err, IsNil)
+		zerov := reflect.ValueOf(zero)
+		value := zerov.Interface()
+		if zerov.Kind() == reflect.Ptr {
+			value = zerov.Elem().Interface()
+		}
+		c.Logf("Loaded: %#v", value)
+		c.Assert(value, DeepEquals, item.c)
+	}
+}

+ 27 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/specdata/update.sh

@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -e
+
+if [ ! -d specifications ]; then
+	git clone -b bson git@github.com:jyemin/specifications
+fi
+
+TESTFILE="../specdata_test.go"
+
+cat <<END > $TESTFILE
+package bson_test
+
+var specTests = []string{
+END
+
+for file in specifications/source/bson/tests/*.yml; do
+	(
+		echo '`'
+		cat $file
+		echo -n '`,'
+	) >> $TESTFILE
+done
+
+echo '}' >> $TESTFILE
+
+gofmt -w $TESTFILE

+ 241 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bson/specdata_test.go

@@ -0,0 +1,241 @@
+package bson_test
+
+var specTests = []string{
+	`
+--- 
+description: "Array type"
+documents:
+  - 
+    decoded: 
+      a : []
+    encoded: 0D000000046100050000000000 
+  - 
+    decoded: 
+      a: [10]
+    encoded: 140000000461000C0000001030000A0000000000
+  -
+    # Decode an array that uses an empty string as the key
+    decodeOnly : true
+    decoded: 
+      a: [10]
+    encoded: 130000000461000B00000010000A0000000000
+  -
+    # Decode an array that uses a non-numeric string as the key
+    decodeOnly : true
+    decoded: 
+      a: [10]
+    encoded: 150000000461000D000000106162000A0000000000
+
+
+`, `
+--- 
+description: "Boolean type"
+documents: 
+  - 
+    encoded: "090000000862000100"
+    decoded: { "b" : true }
+  - 
+    encoded: "090000000862000000"
+    decoded: { "b" : false }
+    
+ 
+  `, `
+--- 
+description: "Corrupted BSON"
+documents:
+  -
+    encoded: "09000000016600"
+    error: "truncated double"
+  -
+    encoded: "09000000026600"
+    error: "truncated string"
+  -
+    encoded: "09000000036600"
+    error: "truncated document"
+  -
+    encoded: "09000000046600"
+    error: "truncated array"
+  -
+    encoded: "09000000056600"
+    error: "truncated binary"
+  -
+    encoded: "09000000076600"
+    error: "truncated objectid"
+  -
+    encoded: "09000000086600"
+    error: "truncated boolean"
+  -
+    encoded: "09000000096600"
+    error: "truncated date"
+  -
+    encoded: "090000000b6600"
+    error: "truncated regex"
+  -
+    encoded: "090000000c6600"
+    error: "truncated db pointer"
+  -
+    encoded: "0C0000000d6600"
+    error: "truncated javascript"
+  -
+    encoded: "0C0000000e6600"
+    error: "truncated symbol"
+  -
+    encoded: "0C0000000f6600"
+    error: "truncated javascript with scope"
+  -
+    encoded: "0C000000106600"
+    error: "truncated int32"
+  -
+    encoded: "0C000000116600"
+    error: "truncated timestamp"
+  -
+    encoded: "0C000000126600"
+    error: "truncated int64"
+  - 
+    encoded: "0400000000"
+    error: basic
+  - 
+    encoded: "0500000001"
+    error: basic
+  - 
+    encoded: "05000000"
+    error: basic
+  - 
+    encoded: "0700000002610078563412"
+    error: basic
+  - 
+    encoded: "090000001061000500"
+    error: basic
+  - 
+    encoded: "00000000000000000000"
+    error: basic
+  - 
+    encoded: "1300000002666f6f00040000006261720000"
+    error: "basic"
+  - 
+    encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
+    error: basic
+  - 
+    encoded: "1500000003666f6f000c0000000862617200010000"
+    error: basic
+  - 
+    encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
+    error: basic
+  - 
+    encoded: "1000000002610004000000616263ff00"
+    error: string is not null-terminated
+  - 
+    encoded: "0c0000000200000000000000"
+    error: bad_string_length
+  - 
+    encoded: "120000000200ffffffff666f6f6261720000"
+    error: bad_string_length
+  - 
+    encoded: "0c0000000e00000000000000"
+    error: bad_string_length
+  - 
+    encoded: "120000000e00ffffffff666f6f6261720000"
+    error: bad_string_length
+  - 
+    encoded: "180000000c00fa5bd841d6585d9900"
+    error: ""
+  - 
+    encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
+    error: bad_string_length
+  - 
+    encoded: "0c0000000d00000000000000"
+    error: bad_string_length
+  - 
+    encoded: "0c0000000d00ffffffff0000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f001500000000000000000c000000020001000000000000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f001500000001000000000c000000020000000000000000"
+    error: bad_string_length
+  - 
+    encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
+    error: bad_string_length
+  - 
+    encoded: "0E00000008616263646566676869707172737475"
+    error: "Run-on CString"
+  - 
+    encoded: "0100000000"
+    error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
+  - 
+    encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
+    error: "One object, but with object size listed smaller than it is in the data"
+  - 
+    encoded: "05000000"
+    error: "One object, missing the EOO at the end"
+  - 
+    encoded: "0500000001"
+    error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
+  - 
+    encoded: "05000000ff"
+    error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
+  - 
+    encoded: "0500000070"
+    error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
+  - 
+    encoded: "07000000000000"
+    error: "Invalid BSON type low range"
+  - 
+    encoded: "07000000800000"
+    error: "Invalid BSON type high range"
+  -
+    encoded: "090000000862000200"
+    error: "Invalid boolean value of 2"
+  - 
+    encoded: "09000000086200ff00"
+    error: "Invalid boolean value of -1"
+  `, `
+--- 
+description: "Int32 type"
+documents: 
+  - 
+    decoded: 
+      i: -2147483648
+    encoded: 0C0000001069000000008000
+  - 
+    decoded: 
+      i: 2147483647
+    encoded: 0C000000106900FFFFFF7F00
+  - 
+    decoded: 
+      i: -1
+    encoded: 0C000000106900FFFFFFFF00
+  - 
+    decoded: 
+      i: 0
+    encoded: 0C0000001069000000000000
+  - 
+    decoded: 
+      i: 1
+    encoded: 0C0000001069000100000000
+
+`, `
+--- 
+description: "String type"
+documents:
+  - 
+    decoded: 
+      s : ""
+    encoded: 0D000000027300010000000000
+  - 
+    decoded: 
+      s: "a"
+    encoded: 0E00000002730002000000610000
+  - 
+    decoded: 
+      s: "This is a string"
+    encoded: 1D0000000273001100000054686973206973206120737472696E670000
+  - 
+    decoded: 
+      s: "κόσμε"
+    encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
+`}

+ 351 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bulk.go

@@ -0,0 +1,351 @@
+package mgo
+
+import (
+	"bytes"
+	"sort"
+
+	"gopkg.in/mgo.v2-unstable/bson"
+)
+
+// Bulk represents an operation that can be prepared with several
+// orthogonal changes before being delivered to the server.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, in those releases updates and
+// removals are sent individually, and inserts are sent in bulk but have
+// suboptimal error reporting compared to more recent versions of the server.
+// See the documentation of BulkErrorCase for details on that.
+//
+// Relevant documentation:
+//
+//   http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
+//
+type Bulk struct {
+	c       *Collection
+	opcount int
+	actions []bulkAction
+	ordered bool
+}
+
+type bulkOp int
+
+const (
+	bulkInsert bulkOp = iota + 1
+	bulkUpdate
+	bulkUpdateAll
+	bulkRemove
+)
+
+type bulkAction struct {
+	op   bulkOp
+	docs []interface{}
+	idxs []int
+}
+
+type bulkUpdateOp []interface{}
+type bulkDeleteOp []interface{}
+
+// BulkResult holds the results for a bulk operation.
+type BulkResult struct {
+	Matched  int
+	Modified int // Available only for MongoDB 2.6+
+
+	// Be conservative while we understand exactly how to report these
+	// results in a useful and convenient way, and also how to emulate
+	// them with prior servers.
+	private bool
+}
+
+// BulkError holds an error returned from running a Bulk operation.
+// Individual errors may be obtained and inspected via the Cases method.
+type BulkError struct {
+	ecases []BulkErrorCase
+}
+
+func (e *BulkError) Error() string {
+	if len(e.ecases) == 0 {
+		return "invalid BulkError instance: no errors"
+	}
+	if len(e.ecases) == 1 {
+		return e.ecases[0].Err.Error()
+	}
+	msgs := make([]string, 0, len(e.ecases))
+	seen := make(map[string]bool)
+	for _, ecase := range e.ecases {
+		msg := ecase.Err.Error()
+		if !seen[msg] {
+			seen[msg] = true
+			msgs = append(msgs, msg)
+		}
+	}
+	if len(msgs) == 1 {
+		return msgs[0]
+	}
+	var buf bytes.Buffer
+	buf.WriteString("multiple errors in bulk operation:\n")
+	for _, msg := range msgs {
+		buf.WriteString("  - ")
+		buf.WriteString(msg)
+		buf.WriteByte('\n')
+	}
+	return buf.String()
+}
+
+type bulkErrorCases []BulkErrorCase
+
+func (slice bulkErrorCases) Len() int           { return len(slice) }
+func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
+func (slice bulkErrorCases) Swap(i, j int)      { slice[i], slice[j] = slice[j], slice[i] }
+
+// BulkErrorCase holds an individual error found while attempting a single change
+// within a bulk operation, and the position in which it was enqueued.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, only the last error is reported
+// for bulk inserts and without any positional information, so the Index
+// field is set to -1 in these cases.
+type BulkErrorCase struct {
+	Index int // Position of operation that failed, or -1 if unknown.
+	Err   error
+}
+
+// Cases returns all individual errors found while attempting the requested changes.
+//
+// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
+func (e *BulkError) Cases() []BulkErrorCase {
+	return e.ecases
+}
+
+// Bulk returns a value to prepare the execution of a bulk operation.
+func (c *Collection) Bulk() *Bulk {
+	return &Bulk{c: c, ordered: true}
+}
+
+// Unordered puts the bulk operation in unordered mode.
+//
+// In unordered mode the indvidual operations may be sent
+// out of order, which means latter operations may proceed
+// even if prior ones have failed.
+func (b *Bulk) Unordered() {
+	b.ordered = false
+}
+
+func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
+	var action *bulkAction
+	if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
+		action = &b.actions[len(b.actions)-1]
+	} else if !b.ordered {
+		for i := range b.actions {
+			if b.actions[i].op == op {
+				action = &b.actions[i]
+				break
+			}
+		}
+	}
+	if action == nil {
+		b.actions = append(b.actions, bulkAction{op: op})
+		action = &b.actions[len(b.actions)-1]
+	}
+	for i := 0; i < opcount; i++ {
+		action.idxs = append(action.idxs, b.opcount)
+		b.opcount++
+	}
+	return action
+}
+
+// Insert queues up the provided documents for insertion.
+func (b *Bulk) Insert(docs ...interface{}) {
+	action := b.action(bulkInsert, len(docs))
+	action.docs = append(action.docs, docs...)
+}
+
+// Remove queues up the provided selectors for removing matching documents.
+// Each selector will remove only a single matching document.
+func (b *Bulk) Remove(selectors ...interface{}) {
+	action := b.action(bulkRemove, len(selectors))
+	for _, selector := range selectors {
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &deleteOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Flags:      1,
+			Limit:      1,
+		})
+	}
+}
+
+// RemoveAll queues up the provided selectors for removing all matching documents.
+// Each selector will remove all matching documents.
+func (b *Bulk) RemoveAll(selectors ...interface{}) {
+	action := b.action(bulkRemove, len(selectors))
+	for _, selector := range selectors {
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &deleteOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Flags:      0,
+			Limit:      0,
+		})
+	}
+}
+
+// Update queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Update(pairs ...interface{}) {
+	if len(pairs)%2 != 0 {
+		panic("Bulk.Update requires an even number of parameters")
+	}
+	action := b.action(bulkUpdate, len(pairs)/2)
+	for i := 0; i < len(pairs); i += 2 {
+		selector := pairs[i]
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &updateOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Update:     pairs[i+1],
+		})
+	}
+}
+
+// UpdateAll queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair updates all documents matching the selector.
+func (b *Bulk) UpdateAll(pairs ...interface{}) {
+	if len(pairs)%2 != 0 {
+		panic("Bulk.UpdateAll requires an even number of parameters")
+	}
+	action := b.action(bulkUpdate, len(pairs)/2)
+	for i := 0; i < len(pairs); i += 2 {
+		selector := pairs[i]
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &updateOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Update:     pairs[i+1],
+			Flags:      2,
+			Multi:      true,
+		})
+	}
+}
+
+// Upsert queues up the provided pairs of upserting instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Upsert(pairs ...interface{}) {
+	if len(pairs)%2 != 0 {
+		panic("Bulk.Update requires an even number of parameters")
+	}
+	action := b.action(bulkUpdate, len(pairs)/2)
+	for i := 0; i < len(pairs); i += 2 {
+		selector := pairs[i]
+		if selector == nil {
+			selector = bson.D{}
+		}
+		action.docs = append(action.docs, &updateOp{
+			Collection: b.c.FullName,
+			Selector:   selector,
+			Update:     pairs[i+1],
+			Flags:      1,
+			Upsert:     true,
+		})
+	}
+}
+
+// Run runs all the operations queued up.
+//
+// If an error is reported on an unordered bulk operation, the error value may
+// be an aggregation of all issues observed. As an exception to that, Insert
+// operations running on MongoDB versions prior to 2.6 will report the last
+// error only due to a limitation in the wire protocol.
+func (b *Bulk) Run() (*BulkResult, error) {
+	var result BulkResult
+	var berr BulkError
+	var failed bool
+	for i := range b.actions {
+		action := &b.actions[i]
+		var ok bool
+		switch action.op {
+		case bulkInsert:
+			ok = b.runInsert(action, &result, &berr)
+		case bulkUpdate:
+			ok = b.runUpdate(action, &result, &berr)
+		case bulkRemove:
+			ok = b.runRemove(action, &result, &berr)
+		default:
+			panic("unknown bulk operation")
+		}
+		if !ok {
+			failed = true
+			if b.ordered {
+				break
+			}
+		}
+	}
+	if failed {
+		sort.Sort(bulkErrorCases(berr.ecases))
+		return nil, &berr
+	}
+	return &result, nil
+}
+
+func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+	op := &insertOp{b.c.FullName, action.docs, 0}
+	if !b.ordered {
+		op.flags = 1 // ContinueOnError
+	}
+	lerr, err := b.c.writeOp(op, b.ordered)
+	return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+	lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
+	if lerr != nil {
+		result.Matched += lerr.N
+		result.Modified += lerr.modified
+	}
+	return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+	lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
+	if lerr != nil {
+		result.Matched += lerr.N
+		result.Modified += lerr.modified
+	}
+	return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
+	if lerr != nil && len(lerr.ecases) > 0 {
+		for i := 0; i < len(lerr.ecases); i++ {
+			// Map back from the local error index into the visible one.
+			ecase := lerr.ecases[i]
+			idx := ecase.Index
+			if idx >= 0 {
+				idx = action.idxs[idx]
+			}
+			berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
+		}
+		return false
+	} else if err != nil {
+		for i := 0; i < len(action.idxs); i++ {
+			berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
+		}
+		return false
+	}
+	return true
+}

+ 504 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/bulk_test.go

@@ -0,0 +1,504 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2-unstable"
+)
+
+func (s *S) TestBulkInsert(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Insert(M{"n": 1})
+	bulk.Insert(M{"n": 2}, M{"n": 3})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
+}
+
+func (s *S) TestBulkInsertError(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	type doc struct {
+		N int `_id`
+	}
+	var res []doc
+	err = coll.Find(nil).Sort("_id").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {2}})
+}
+
+func (s *S) TestBulkInsertErrorUnordered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+
+	type doc struct {
+		N int `_id`
+	}
+	var res []doc
+	err = coll.Find(nil).Sort("_id").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
+}
+
+func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
+	// The server has a batch limit of 1000 documents when using write commands.
+	// This artificial limit did not exist with the old wire protocol, so to
+	// avoid compatibility issues the implementation internally split batches
+	// into the proper size and delivers them one by one. This test ensures that
+	// the behavior of unordered (that is, continue on error) remains correct
+	// when errors happen and there are batches left.
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+	bulk := coll.Bulk()
+	bulk.Unordered()
+
+	const total = 4096
+	type doc struct {
+		Id int `_id`
+	}
+	docs := make([]interface{}, total)
+	for i := 0; i < total; i++ {
+		docs[i] = doc{i}
+	}
+	docs[1] = doc{0}
+	bulk.Insert(docs...)
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+
+	n, err := coll.Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, total-1)
+
+	var res doc
+	err = coll.FindId(1500).One(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res.Id, Equals, 1500)
+}
+
+func (s *S) TestBulkErrorString(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	// If it's just the same string multiple times, join it into a single message.
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
+	_, err = bulk.Run()
+	c.Assert(err, ErrorMatches, ".*duplicate key.*")
+	c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	// With matching errors but different messages, present them all.
+	bulk = coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
+	_, err = bulk.Run()
+	if s.versionAtLeast(2, 6) {
+		c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n(  - .*duplicate.*\n){2}$")
+		c.Assert(err, ErrorMatches, "(?s).*dupone.*")
+		c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
+	} else {
+		// Wire protocol query doesn't return all errors.
+		c.Assert(err, ErrorMatches, ".*duplicate.*")
+	}
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	// With mixed errors, present them all.
+	bulk = coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
+	_, err = bulk.Run()
+	if s.versionAtLeast(2, 6) {
+		c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n  - .*duplicate.*\n  - .*array.*\n$")
+	} else {
+		// Wire protocol query doesn't return all errors.
+		c.Assert(err, ErrorMatches, ".*array.*")
+	}
+	c.Assert(mgo.IsDup(err), Equals, false)
+}
+
+func (s *S) TestBulkErrorCases_2_6(c *C) {
+	if !s.versionAtLeast(2, 6) {
+		c.Skip("2.4- has poor bulk reporting")
+	}
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	bulk := coll.Bulk()
+	bulk.Unordered()
+
+	// There's a limit of 1000 operations per command, so
+	// this forces the more complex indexing logic to act.
+	for i := 0; i < 1010; i++ {
+		switch i {
+		case 3, 14:
+			bulk.Insert(M{"_id": "dupone"})
+		case 5, 106:
+			bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}})
+		case 7, 1008:
+			bulk.Insert(M{"_id": "duptwo"})
+		default:
+			bulk.Insert(M{"_id": i})
+		}
+	}
+
+	_, err = bulk.Run()
+	ecases := err.(*mgo.BulkError).Cases()
+
+	c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
+	c.Check(ecases[0].Index, Equals, 14)
+	c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*")
+	c.Check(ecases[1].Index, Equals, 106)
+	c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*")
+	c.Check(ecases[2].Index, Equals, 1008)
+}
+
+func (s *S) TestBulkErrorCases_2_4(c *C) {
+	if s.versionAtLeast(2, 6) {
+		c.Skip("2.6+ has better reporting")
+	}
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	bulk := coll.Bulk()
+	bulk.Unordered()
+
+	// There's a limit of 1000 operations per command, so
+	// this forces the more complex indexing logic to act.
+	for i := 0; i < 1010; i++ {
+		switch i {
+		case 3, 14:
+			bulk.Insert(M{"_id": "dupone"})
+		case 5:
+			bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}})
+		case 106:
+			bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}})
+		case 7, 1008:
+			bulk.Insert(M{"_id": "duptwo"})
+		default:
+			bulk.Insert(M{"_id": i})
+		}
+	}
+
+	_, err = bulk.Run()
+	ecases := err.(*mgo.BulkError).Cases()
+
+	c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*")
+	c.Check(ecases[0].Index, Equals, -1)
+	c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`)
+	c.Check(ecases[1].Index, Equals, 106)
+}
+
+func (s *S) TestBulkErrorCasesOrdered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	bulk := coll.Bulk()
+
+	// There's a limit of 1000 operations per command, so
+	// this forces the more complex indexing logic to act.
+	for i := 0; i < 20; i++ {
+		switch i {
+		case 3, 14:
+			bulk.Insert(M{"_id": "dupone"})
+		case 7, 17:
+			bulk.Insert(M{"_id": "duptwo"})
+		default:
+			bulk.Insert(M{"_id": i})
+		}
+	}
+
+	_, err = bulk.Run()
+	ecases := err.(*mgo.BulkError).Cases()
+
+	c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
+	if s.versionAtLeast(2, 6) {
+		c.Check(ecases[0].Index, Equals, 14)
+	} else {
+		c.Check(ecases[0].Index, Equals, -1)
+	}
+	c.Check(ecases, HasLen, 1)
+}
+
+func (s *S) TestBulkUpdate(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
+	bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
+	bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+	bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 4)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(r.Modified, Equals, 3)
+	}
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
+}
+
+func (s *S) TestBulkUpdateError(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Update(
+		M{"n": 1}, M{"$set": M{"n": 10}},
+		M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+		M{"n": 3}, M{"$set": M{"n": 30}},
+	)
+	r, err := bulk.Run()
+	c.Assert(err, ErrorMatches, ".*_id.*")
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
+}
+
+func (s *S) TestBulkUpdateErrorUnordered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Update(
+		M{"n": 1}, M{"$set": M{"n": 10}},
+		M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+		M{"n": 3}, M{"$set": M{"n": 30}},
+	)
+	r, err := bulk.Run()
+	c.Assert(err, ErrorMatches, ".*_id.*")
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
+}
+
+func (s *S) TestBulkUpdateAll(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
+	bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}})  // Won't change.
+	bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+	bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 6)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(r.Modified, Equals, 5)
+	}
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
+}
+
+func (s *S) TestBulkMixedUnordered(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	// Abuse undefined behavior to ensure the desired implementation is in place.
+	bulk := coll.Bulk()
+	bulk.Unordered()
+	bulk.Insert(M{"n": 1})
+	bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
+	bulk.Insert(M{"n": 2})
+	bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
+	bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
+	bulk.Insert(M{"n": 3})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 3)
+	if s.versionAtLeast(2, 6) {
+		c.Assert(r.Modified, Equals, 3)
+	}
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
+}
+
+func (s *S) TestBulkUpsert(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
+	bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
+}
+
+func (s *S) TestBulkRemove(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.Remove(M{"n": 1})
+	bulk.Remove(M{"n": 2}, M{"n": 4})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 3)
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{3}, {4}})
+}
+
+func (s *S) TestBulkRemoveAll(c *C) {
+	session, err := mgo.Dial("localhost:40001")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	coll := session.DB("mydb").C("mycoll")
+
+	err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+	c.Assert(err, IsNil)
+
+	bulk := coll.Bulk()
+	bulk.RemoveAll(M{"n": 1})
+	bulk.RemoveAll(M{"n": 2}, M{"n": 4})
+	r, err := bulk.Run()
+	c.Assert(err, IsNil)
+	c.Assert(r.Matched, Equals, 4)
+
+	type doc struct{ N int }
+	var res []doc
+	err = coll.Find(nil).Sort("n").All(&res)
+	c.Assert(err, IsNil)
+	c.Assert(res, DeepEquals, []doc{{3}})
+}

+ 682 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/cluster.go

@@ -0,0 +1,682 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"sync"
+	"time"
+
+	"gopkg.in/mgo.v2-unstable/bson"
+	"strconv"
+	"strings"
+)
+
+// ---------------------------------------------------------------------------
+// Mongo cluster encapsulation.
+//
+// A cluster enables the communication with one or more servers participating
+// in a mongo cluster.  This works with individual servers, a replica set,
+// a replica pair, one or multiple mongos routers, etc.
+
+type mongoCluster struct {
+	sync.RWMutex
+	serverSynced sync.Cond
+	userSeeds    []string
+	dynaSeeds    []string
+	servers      mongoServers
+	masters      mongoServers
+	references   int
+	syncing      bool
+	direct       bool
+	failFast     bool
+	syncCount    uint
+	setName      string
+	cachedIndex  map[string]bool
+	sync         chan bool
+	dial         dialer
+}
+
+func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
+	cluster := &mongoCluster{
+		userSeeds:  userSeeds,
+		references: 1,
+		direct:     direct,
+		failFast:   failFast,
+		dial:       dial,
+		setName:    setName,
+	}
+	cluster.serverSynced.L = cluster.RWMutex.RLocker()
+	cluster.sync = make(chan bool, 1)
+	stats.cluster(+1)
+	go cluster.syncServersLoop()
+	return cluster
+}
+
+// Acquire increases the reference count for the cluster.
+func (cluster *mongoCluster) Acquire() {
+	cluster.Lock()
+	cluster.references++
+	debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
+	cluster.Unlock()
+}
+
+// Release decreases the reference count for the cluster. Once
+// it reaches zero, all servers will be closed.
+func (cluster *mongoCluster) Release() {
+	cluster.Lock()
+	if cluster.references == 0 {
+		panic("cluster.Release() with references == 0")
+	}
+	cluster.references--
+	debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
+	if cluster.references == 0 {
+		for _, server := range cluster.servers.Slice() {
+			server.Close()
+		}
+		// Wake up the sync loop so it can die.
+		cluster.syncServers()
+		stats.cluster(-1)
+	}
+	cluster.Unlock()
+}
+
+func (cluster *mongoCluster) LiveServers() (servers []string) {
+	cluster.RLock()
+	for _, serv := range cluster.servers.Slice() {
+		servers = append(servers, serv.Addr)
+	}
+	cluster.RUnlock()
+	return servers
+}
+
+func (cluster *mongoCluster) removeServer(server *mongoServer) {
+	cluster.Lock()
+	cluster.masters.Remove(server)
+	other := cluster.servers.Remove(server)
+	cluster.Unlock()
+	if other != nil {
+		other.Close()
+		log("Removed server ", server.Addr, " from cluster.")
+	}
+	server.Close()
+}
+
+type isMasterResult struct {
+	IsMaster       bool
+	Secondary      bool
+	Primary        string
+	Hosts          []string
+	Passives       []string
+	Tags           bson.D
+	Msg            string
+	SetName        string `bson:"setName"`
+	MaxWireVersion int    `bson:"maxWireVersion"`
+}
+
+func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
+	// Monotonic let's it talk to a slave and still hold the socket.
+	session := newSession(Monotonic, cluster, 10*time.Second)
+	session.setSocket(socket)
+	err := session.Run("ismaster", result)
+	session.Close()
+	return err
+}
+
+type possibleTimeout interface {
+	Timeout() bool
+}
+
+var syncSocketTimeout = 5 * time.Second
+
+func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
+	var syncTimeout time.Duration
+	if raceDetector {
+		// This variable is only ever touched by tests.
+		globalMutex.Lock()
+		syncTimeout = syncSocketTimeout
+		globalMutex.Unlock()
+	} else {
+		syncTimeout = syncSocketTimeout
+	}
+
+	addr := server.Addr
+	log("SYNC Processing ", addr, "...")
+
+	// Retry a few times to avoid knocking a server down for a hiccup.
+	var result isMasterResult
+	var tryerr error
+	for retry := 0; ; retry++ {
+		if retry == 3 || retry == 1 && cluster.failFast {
+			return nil, nil, tryerr
+		}
+		if retry > 0 {
+			// Don't abuse the server needlessly if there's something actually wrong.
+			if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
+				// Give a chance for waiters to timeout as well.
+				cluster.serverSynced.Broadcast()
+			}
+			time.Sleep(syncShortDelay)
+		}
+
+		// It's not clear what would be a good timeout here. Is it
+		// better to wait longer or to retry?
+		socket, _, err := server.AcquireSocket(0, syncTimeout)
+		if err != nil {
+			tryerr = err
+			logf("SYNC Failed to get socket to %s: %v", addr, err)
+			continue
+		}
+		err = cluster.isMaster(socket, &result)
+		socket.Release()
+		if err != nil {
+			tryerr = err
+			logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
+			continue
+		}
+		debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
+		break
+	}
+
+	if cluster.setName != "" && result.SetName != cluster.setName {
+		logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
+		return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
+	}
+
+	if result.IsMaster {
+		debugf("SYNC %s is a master.", addr)
+		if !server.info.Master {
+			// Made an incorrect assumption above, so fix stats.
+			stats.conn(-1, false)
+			stats.conn(+1, true)
+		}
+	} else if result.Secondary {
+		debugf("SYNC %s is a slave.", addr)
+	} else if cluster.direct {
+		logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
+	} else {
+		logf("SYNC %s is neither a master nor a slave.", addr)
+		// Let stats track it as whatever was known before.
+		return nil, nil, errors.New(addr + " is not a master nor slave")
+	}
+
+	info = &mongoServerInfo{
+		Master:         result.IsMaster,
+		Mongos:         result.Msg == "isdbgrid",
+		Tags:           result.Tags,
+		SetName:        result.SetName,
+		MaxWireVersion: result.MaxWireVersion,
+	}
+
+	hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
+	if result.Primary != "" {
+		// First in the list to speed up master discovery.
+		hosts = append(hosts, result.Primary)
+	}
+	hosts = append(hosts, result.Hosts...)
+	hosts = append(hosts, result.Passives...)
+
+	debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
+	return info, hosts, nil
+}
+
+type syncKind bool
+
+const (
+	completeSync syncKind = true
+	partialSync  syncKind = false
+)
+
+func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
+	cluster.Lock()
+	current := cluster.servers.Search(server.ResolvedAddr)
+	if current == nil {
+		if syncKind == partialSync {
+			cluster.Unlock()
+			server.Close()
+			log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
+			return
+		}
+		cluster.servers.Add(server)
+		if info.Master {
+			cluster.masters.Add(server)
+			log("SYNC Adding ", server.Addr, " to cluster as a master.")
+		} else {
+			log("SYNC Adding ", server.Addr, " to cluster as a slave.")
+		}
+	} else {
+		if server != current {
+			panic("addServer attempting to add duplicated server")
+		}
+		if server.Info().Master != info.Master {
+			if info.Master {
+				log("SYNC Server ", server.Addr, " is now a master.")
+				cluster.masters.Add(server)
+			} else {
+				log("SYNC Server ", server.Addr, " is now a slave.")
+				cluster.masters.Remove(server)
+			}
+		}
+	}
+	server.SetInfo(info)
+	debugf("SYNC Broadcasting availability of server %s", server.Addr)
+	cluster.serverSynced.Broadcast()
+	cluster.Unlock()
+}
+
+func (cluster *mongoCluster) getKnownAddrs() []string {
+	cluster.RLock()
+	max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
+	seen := make(map[string]bool, max)
+	known := make([]string, 0, max)
+
+	add := func(addr string) {
+		if _, found := seen[addr]; !found {
+			seen[addr] = true
+			known = append(known, addr)
+		}
+	}
+
+	for _, addr := range cluster.userSeeds {
+		add(addr)
+	}
+	for _, addr := range cluster.dynaSeeds {
+		add(addr)
+	}
+	for _, serv := range cluster.servers.Slice() {
+		add(serv.Addr)
+	}
+	cluster.RUnlock()
+
+	return known
+}
+
+// syncServers injects a value into the cluster.sync channel to force
+// an iteration of the syncServersLoop function.
+func (cluster *mongoCluster) syncServers() {
+	select {
+	case cluster.sync <- true:
+	default:
+	}
+}
+
+// How long to wait for a checkup of the cluster topology if nothing
+// else kicks a synchronization before that.
+const syncServersDelay = 30 * time.Second
+const syncShortDelay = 500 * time.Millisecond
+
+// syncServersLoop loops while the cluster is alive to keep its idea of
+// the server topology up-to-date. It must be called just once from
+// newCluster.  The loop iterates once syncServersDelay has passed, or
+// if somebody injects a value into the cluster.sync channel to force a
+// synchronization.  A loop iteration will contact all servers in
+// parallel, ask them about known peers and their own role within the
+// cluster, and then attempt to do the same with all the peers
+// retrieved.
+func (cluster *mongoCluster) syncServersLoop() {
+	for {
+		debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
+
+		cluster.Lock()
+		if cluster.references == 0 {
+			cluster.Unlock()
+			break
+		}
+		cluster.references++ // Keep alive while syncing.
+		direct := cluster.direct
+		cluster.Unlock()
+
+		cluster.syncServersIteration(direct)
+
+		// We just synchronized, so consume any outstanding requests.
+		select {
+		case <-cluster.sync:
+		default:
+		}
+
+		cluster.Release()
+
+		// Hold off before allowing another sync. No point in
+		// burning CPU looking for down servers.
+		if !cluster.failFast {
+			time.Sleep(syncShortDelay)
+		}
+
+		cluster.Lock()
+		if cluster.references == 0 {
+			cluster.Unlock()
+			break
+		}
+		cluster.syncCount++
+		// Poke all waiters so they have a chance to timeout or
+		// restart syncing if they wish to.
+		cluster.serverSynced.Broadcast()
+		// Check if we have to restart immediately either way.
+		restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
+		cluster.Unlock()
+
+		if restart {
+			log("SYNC No masters found. Will synchronize again.")
+			time.Sleep(syncShortDelay)
+			continue
+		}
+
+		debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
+
+		// Hold off until somebody explicitly requests a synchronization
+		// or it's time to check for a cluster topology change again.
+		select {
+		case <-cluster.sync:
+		case <-time.After(syncServersDelay):
+		}
+	}
+	debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
+}
+
+func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
+	cluster.RLock()
+	server := cluster.servers.Search(tcpaddr.String())
+	cluster.RUnlock()
+	if server != nil {
+		return server
+	}
+	return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
+}
+
+func resolveAddr(addr string) (*net.TCPAddr, error) {
+	// Simple cases that do not need actual resolution. Works with IPv4 and v6.
+	if host, port, err := net.SplitHostPort(addr); err == nil {
+		if port, _ := strconv.Atoi(port); port > 0 {
+			zone := ""
+			if i := strings.LastIndex(host, "%"); i >= 0 {
+				zone = host[i+1:]
+				host = host[:i]
+			}
+			ip := net.ParseIP(host)
+			if ip != nil {
+				return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
+			}
+		}
+	}
+
+	// Attempt to resolve IPv4 and v6 concurrently.
+	addrChan := make(chan *net.TCPAddr, 2)
+	for _, network := range []string{"udp4", "udp6"} {
+		network := network
+		go func() {
+			// The unfortunate UDP dialing hack allows having a timeout on address resolution.
+			conn, err := net.DialTimeout(network, addr, 10*time.Second)
+			if err != nil {
+				addrChan <- nil
+			} else {
+				addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
+				conn.Close()
+			}
+		}()
+	}
+
+	// Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
+	tcpaddr := <-addrChan
+	if tcpaddr == nil || len(tcpaddr.IP) != 4 {
+		var timeout <-chan time.Time
+		if tcpaddr != nil {
+			// Don't wait too long if an IPv6 address is known.
+			timeout = time.After(50 * time.Millisecond)
+		}
+		select {
+		case <-timeout:
+		case tcpaddr2 := <-addrChan:
+			if tcpaddr == nil || tcpaddr2 != nil {
+				// It's an IPv4 address or the only known address. Use it.
+				tcpaddr = tcpaddr2
+			}
+		}
+	}
+
+	if tcpaddr == nil {
+		log("SYNC Failed to resolve server address: ", addr)
+		return nil, errors.New("failed to resolve server address: " + addr)
+	}
+	if tcpaddr.String() != addr {
+		debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
+	}
+	return tcpaddr, nil
+}
+
+type pendingAdd struct {
+	server *mongoServer
+	info   *mongoServerInfo
+}
+
+func (cluster *mongoCluster) syncServersIteration(direct bool) {
+	log("SYNC Starting full topology synchronization...")
+
+	var wg sync.WaitGroup
+	var m sync.Mutex
+	notYetAdded := make(map[string]pendingAdd)
+	addIfFound := make(map[string]bool)
+	seen := make(map[string]bool)
+	syncKind := partialSync
+
+	var spawnSync func(addr string, byMaster bool)
+	spawnSync = func(addr string, byMaster bool) {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+
+			tcpaddr, err := resolveAddr(addr)
+			if err != nil {
+				log("SYNC Failed to start sync of ", addr, ": ", err.Error())
+				return
+			}
+			resolvedAddr := tcpaddr.String()
+
+			m.Lock()
+			if byMaster {
+				if pending, ok := notYetAdded[resolvedAddr]; ok {
+					delete(notYetAdded, resolvedAddr)
+					m.Unlock()
+					cluster.addServer(pending.server, pending.info, completeSync)
+					return
+				}
+				addIfFound[resolvedAddr] = true
+			}
+			if seen[resolvedAddr] {
+				m.Unlock()
+				return
+			}
+			seen[resolvedAddr] = true
+			m.Unlock()
+
+			server := cluster.server(addr, tcpaddr)
+			info, hosts, err := cluster.syncServer(server)
+			if err != nil {
+				cluster.removeServer(server)
+				return
+			}
+
+			m.Lock()
+			add := direct || info.Master || addIfFound[resolvedAddr]
+			if add {
+				syncKind = completeSync
+			} else {
+				notYetAdded[resolvedAddr] = pendingAdd{server, info}
+			}
+			m.Unlock()
+			if add {
+				cluster.addServer(server, info, completeSync)
+			}
+			if !direct {
+				for _, addr := range hosts {
+					spawnSync(addr, info.Master)
+				}
+			}
+		}()
+	}
+
+	knownAddrs := cluster.getKnownAddrs()
+	for _, addr := range knownAddrs {
+		spawnSync(addr, false)
+	}
+	wg.Wait()
+
+	if syncKind == completeSync {
+		logf("SYNC Synchronization was complete (got data from primary).")
+		for _, pending := range notYetAdded {
+			cluster.removeServer(pending.server)
+		}
+	} else {
+		logf("SYNC Synchronization was partial (cannot talk to primary).")
+		for _, pending := range notYetAdded {
+			cluster.addServer(pending.server, pending.info, partialSync)
+		}
+	}
+
+	cluster.Lock()
+	mastersLen := cluster.masters.Len()
+	logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
+
+	// Update dynamic seeds, but only if we have any good servers. Otherwise,
+	// leave them alone for better chances of a successful sync in the future.
+	if syncKind == completeSync {
+		dynaSeeds := make([]string, cluster.servers.Len())
+		for i, server := range cluster.servers.Slice() {
+			dynaSeeds[i] = server.Addr
+		}
+		cluster.dynaSeeds = dynaSeeds
+		debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
+	}
+	cluster.Unlock()
+}
+
+// AcquireSocket returns a socket to a server in the cluster.  If slaveOk is
+// true, it will attempt to return a socket to a slave server.  If it is
+// false, the socket will necessarily be to a master server.
+func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
+	var started time.Time
+	var syncCount uint
+	warnedLimit := false
+	for {
+		cluster.RLock()
+		for {
+			mastersLen := cluster.masters.Len()
+			slavesLen := cluster.servers.Len() - mastersLen
+			debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
+			if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
+				break
+			}
+			if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
+				break
+			}
+			if started.IsZero() {
+				// Initialize after fast path above.
+				started = time.Now()
+				syncCount = cluster.syncCount
+			} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
+				cluster.RUnlock()
+				return nil, errors.New("no reachable servers")
+			}
+			log("Waiting for servers to synchronize...")
+			cluster.syncServers()
+
+			// Remember: this will release and reacquire the lock.
+			cluster.serverSynced.Wait()
+		}
+
+		var server *mongoServer
+		if slaveOk {
+			server = cluster.servers.BestFit(mode, serverTags)
+		} else {
+			server = cluster.masters.BestFit(mode, nil)
+		}
+		cluster.RUnlock()
+
+		if server == nil {
+			// Must have failed the requested tags. Sleep to avoid spinning.
+			time.Sleep(1e8)
+			continue
+		}
+
+		s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
+		if err == errPoolLimit {
+			if !warnedLimit {
+				warnedLimit = true
+				log("WARNING: Per-server connection limit reached.")
+			}
+			time.Sleep(100 * time.Millisecond)
+			continue
+		}
+		if err != nil {
+			cluster.removeServer(server)
+			cluster.syncServers()
+			continue
+		}
+		if abended && !slaveOk {
+			var result isMasterResult
+			err := cluster.isMaster(s, &result)
+			if err != nil || !result.IsMaster {
+				logf("Cannot confirm server %s as master (%v)", server.Addr, err)
+				s.Release()
+				cluster.syncServers()
+				time.Sleep(100 * time.Millisecond)
+				continue
+			}
+		}
+		return s, nil
+	}
+	panic("unreached")
+}
+
+func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
+	cluster.Lock()
+	if cluster.cachedIndex == nil {
+		cluster.cachedIndex = make(map[string]bool)
+	}
+	if exists {
+		cluster.cachedIndex[cacheKey] = true
+	} else {
+		delete(cluster.cachedIndex, cacheKey)
+	}
+	cluster.Unlock()
+}
+
+func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
+	cluster.RLock()
+	if cluster.cachedIndex != nil {
+		result = cluster.cachedIndex[cacheKey]
+	}
+	cluster.RUnlock()
+	return
+}
+
+func (cluster *mongoCluster) ResetIndexCache() {
+	cluster.Lock()
+	cluster.cachedIndex = make(map[string]bool)
+	cluster.Unlock()
+}

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 2090 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/cluster_test.go


+ 196 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/dbtest/dbserver.go

@@ -0,0 +1,196 @@
+package dbtest
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"os"
+	"os/exec"
+	"strconv"
+	"time"
+
+	"gopkg.in/mgo.v2-unstable"
+	"gopkg.in/tomb.v2"
+)
+
+// DBServer controls a MongoDB server process to be used within test suites.
+//
+// The test server is started when Session is called the first time and should
+// remain running for the duration of all tests, with the Wipe method being
+// called between tests (before each of them) to clear stored data. After all tests
+// are done, the Stop method should be called to stop the test server.
+//
+// Before the DBServer is used the SetPath method must be called to define
+// the location for the database files to be stored.
+type DBServer struct {
+	session *mgo.Session
+	output  bytes.Buffer
+	server  *exec.Cmd
+	dbpath  string
+	host    string
+	tomb    tomb.Tomb
+}
+
+// SetPath defines the path to the directory where the database files will be
+// stored if it is started. The directory path itself is not created or removed
+// by the test helper.
+func (dbs *DBServer) SetPath(dbpath string) {
+	dbs.dbpath = dbpath
+}
+
+func (dbs *DBServer) start() {
+	if dbs.server != nil {
+		panic("DBServer already started")
+	}
+	if dbs.dbpath == "" {
+		panic("DBServer.SetPath must be called before using the server")
+	}
+	mgo.SetStats(true)
+	l, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		panic("unable to listen on a local address: " + err.Error())
+	}
+	addr := l.Addr().(*net.TCPAddr)
+	l.Close()
+	dbs.host = addr.String()
+
+	args := []string{
+		"--dbpath", dbs.dbpath,
+		"--bind_ip", "127.0.0.1",
+		"--port", strconv.Itoa(addr.Port),
+		"--nssize", "1",
+		"--noprealloc",
+		"--smallfiles",
+		"--nojournal",
+	}
+	dbs.tomb = tomb.Tomb{}
+	dbs.server = exec.Command("mongod", args...)
+	dbs.server.Stdout = &dbs.output
+	dbs.server.Stderr = &dbs.output
+	err = dbs.server.Start()
+	if err != nil {
+		panic(err)
+	}
+	dbs.tomb.Go(dbs.monitor)
+	dbs.Wipe()
+}
+
+func (dbs *DBServer) monitor() error {
+	dbs.server.Process.Wait()
+	if dbs.tomb.Alive() {
+		// Present some debugging information.
+		fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n")
+		fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes())
+		fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n")
+		cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod")
+		cmd.Stdout = os.Stderr
+		cmd.Stderr = os.Stderr
+		cmd.Run()
+		fmt.Fprintf(os.Stderr, "----------------------------------------\n")
+
+		panic("mongod process died unexpectedly")
+	}
+	return nil
+}
+
+// Stop stops the test server process, if it is running.
+//
+// It's okay to call Stop multiple times. After the test server is
+// stopped it cannot be restarted.
+//
+// All database sessions must be closed before or while the Stop method
+// is running. Otherwise Stop will panic after a timeout informing that
+// there is a session leak.
+func (dbs *DBServer) Stop() {
+	if dbs.session != nil {
+		dbs.checkSessions()
+		if dbs.session != nil {
+			dbs.session.Close()
+			dbs.session = nil
+		}
+	}
+	if dbs.server != nil {
+		dbs.tomb.Kill(nil)
+		dbs.server.Process.Signal(os.Interrupt)
+		select {
+		case <-dbs.tomb.Dead():
+		case <-time.After(5 * time.Second):
+			panic("timeout waiting for mongod process to die")
+		}
+		dbs.server = nil
+	}
+}
+
+// Session returns a new session to the server. The returned session
+// must be closed after the test is done with it.
+//
+// The first Session obtained from a DBServer will start it.
+func (dbs *DBServer) Session() *mgo.Session {
+	if dbs.server == nil {
+		dbs.start()
+	}
+	if dbs.session == nil {
+		mgo.ResetStats()
+		var err error
+		dbs.session, err = mgo.Dial(dbs.host + "/test")
+		if err != nil {
+			panic(err)
+		}
+	}
+	return dbs.session.Copy()
+}
+
+// checkSessions ensures all mgo sessions opened were properly closed.
+// For slightly faster tests, it may be disabled setting the
+// environmnet variable CHECK_SESSIONS to 0.
+func (dbs *DBServer) checkSessions() {
+	if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil {
+		return
+	}
+	dbs.session.Close()
+	dbs.session = nil
+	for i := 0; i < 100; i++ {
+		stats := mgo.GetStats()
+		if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
+			return
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+	panic("There are mgo sessions still alive.")
+}
+
+// Wipe drops all created databases and their data.
+//
+// The MongoDB server remains running if it was prevoiusly running,
+// or stopped if it was previously stopped.
+//
+// All database sessions must be closed before or while the Wipe method
+// is running. Otherwise Wipe will panic after a timeout informing that
+// there is a session leak.
+func (dbs *DBServer) Wipe() {
+	if dbs.server == nil || dbs.session == nil {
+		return
+	}
+	dbs.checkSessions()
+	sessionUnset := dbs.session == nil
+	session := dbs.Session()
+	defer session.Close()
+	if sessionUnset {
+		dbs.session.Close()
+		dbs.session = nil
+	}
+	names, err := session.DatabaseNames()
+	if err != nil {
+		panic(err)
+	}
+	for _, name := range names {
+		switch name {
+		case "admin", "local", "config":
+		default:
+			err = session.DB(name).DropDatabase()
+			if err != nil {
+				panic(err)
+			}
+		}
+	}
+}

+ 108 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/dbtest/dbserver_test.go

@@ -0,0 +1,108 @@
+package dbtest_test
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	. "gopkg.in/check.v1"
+
+	"gopkg.in/mgo.v2-unstable"
+	"gopkg.in/mgo.v2-unstable/dbtest"
+)
+
+type M map[string]interface{}
+
+func TestAll(t *testing.T) {
+	TestingT(t)
+}
+
+type S struct {
+	oldCheckSessions string
+}
+
+var _ = Suite(&S{})
+
+func (s *S) SetUpTest(c *C) {
+	s.oldCheckSessions = os.Getenv("CHECK_SESSIONS")
+	os.Setenv("CHECK_SESSIONS", "")
+}
+
+func (s *S) TearDownTest(c *C) {
+	os.Setenv("CHECK_SESSIONS", s.oldCheckSessions)
+}
+
+func (s *S) TestWipeData(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	session := server.Session()
+	err := session.DB("mydb").C("mycoll").Insert(M{"a": 1})
+	session.Close()
+	c.Assert(err, IsNil)
+
+	server.Wipe()
+
+	session = server.Session()
+	names, err := session.DatabaseNames()
+	session.Close()
+	c.Assert(err, IsNil)
+	for _, name := range names {
+		if name != "local" && name != "admin" {
+			c.Fatalf("Wipe should have removed this database: %s", name)
+		}
+	}
+}
+
+func (s *S) TestStop(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	// Server should not be running.
+	process := server.ProcessTest()
+	c.Assert(process, IsNil)
+
+	session := server.Session()
+	addr := session.LiveServers()[0]
+	session.Close()
+
+	// Server should be running now.
+	process = server.ProcessTest()
+	p, err := os.FindProcess(process.Pid)
+	c.Assert(err, IsNil)
+	p.Release()
+
+	server.Stop()
+
+	// Server should not be running anymore.
+	session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond)
+	if session != nil {
+		session.Close()
+		c.Fatalf("Stop did not stop the server")
+	}
+}
+
+func (s *S) TestCheckSessions(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	session := server.Session()
+	defer session.Close()
+	c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.")
+}
+
+func (s *S) TestCheckSessionsDisabled(c *C) {
+	var server dbtest.DBServer
+	server.SetPath(c.MkDir())
+	defer server.Stop()
+
+	os.Setenv("CHECK_SESSIONS", "0")
+
+	// Should not panic, although it looks to Wipe like this session will leak.
+	session := server.Session()
+	defer session.Close()
+	server.Wipe()
+}

+ 12 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/dbtest/export_test.go

@@ -0,0 +1,12 @@
+package dbtest
+
+import (
+	"os"
+)
+
+func (dbs *DBServer) ProcessTest() *os.Process {
+	if dbs.server == nil {
+		return nil
+	}
+	return dbs.server.Process
+}

+ 31 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/doc.go

@@ -0,0 +1,31 @@
+// Package mgo offers a rich MongoDB driver for Go.
+//
+// Details about the mgo project (pronounced as "mango") are found
+// in its web page:
+//
+//     http://labix.org/mgo
+//
+// Usage of the driver revolves around the concept of sessions.  To
+// get started, obtain a session using the Dial function:
+//
+//     session, err := mgo.Dial(url)
+//
+// This will establish one or more connections with the cluster of
+// servers defined by the url parameter.  From then on, the cluster
+// may be queried with multiple consistency rules (see SetMode) and
+// documents retrieved with statements such as:
+//
+//     c := session.DB(database).C(collection)
+//     err := c.Find(query).One(&result)
+//
+// New sessions are typically created by calling session.Copy on the
+// initial session obtained at dial time. These new sessions will share
+// the same cluster information and connection pool, and may be easily
+// handed into other methods and functions for organizing logic.
+// Every session created must have its Close method called at the end
+// of its life time, so its resources may be put back in the pool or
+// collected, depending on the case.
+//
+// For more details, see the documentation for the types and methods.
+//
+package mgo

+ 33 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/export_test.go

@@ -0,0 +1,33 @@
+package mgo
+
+import (
+	"time"
+)
+
+func HackPingDelay(newDelay time.Duration) (restore func()) {
+	globalMutex.Lock()
+	defer globalMutex.Unlock()
+
+	oldDelay := pingDelay
+	restore = func() {
+		globalMutex.Lock()
+		pingDelay = oldDelay
+		globalMutex.Unlock()
+	}
+	pingDelay = newDelay
+	return
+}
+
+func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
+	globalMutex.Lock()
+	defer globalMutex.Unlock()
+
+	oldTimeout := syncSocketTimeout
+	restore = func() {
+		globalMutex.Lock()
+		syncSocketTimeout = oldTimeout
+		globalMutex.Unlock()
+	}
+	syncSocketTimeout = newTimeout
+	return
+}

+ 761 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/gridfs.go

@@ -0,0 +1,761 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+	"crypto/md5"
+	"encoding/hex"
+	"errors"
+	"hash"
+	"io"
+	"os"
+	"sync"
+	"time"
+
+	"gopkg.in/mgo.v2-unstable/bson"
+)
+
+type GridFS struct {
+	Files  *Collection
+	Chunks *Collection
+}
+
+type gfsFileMode int
+
+const (
+	gfsClosed  gfsFileMode = 0
+	gfsReading gfsFileMode = 1
+	gfsWriting gfsFileMode = 2
+)
+
+type GridFile struct {
+	m    sync.Mutex
+	c    sync.Cond
+	gfs  *GridFS
+	mode gfsFileMode
+	err  error
+
+	chunk  int
+	offset int64
+
+	wpending int
+	wbuf     []byte
+	wsum     hash.Hash
+
+	rbuf   []byte
+	rcache *gfsCachedChunk
+
+	doc gfsFile
+}
+
+type gfsFile struct {
+	Id          interface{} "_id"
+	ChunkSize   int         "chunkSize"
+	UploadDate  time.Time   "uploadDate"
+	Length      int64       ",minsize"
+	MD5         string
+	Filename    string    ",omitempty"
+	ContentType string    "contentType,omitempty"
+	Metadata    *bson.Raw ",omitempty"
+}
+
+type gfsChunk struct {
+	Id      interface{} "_id"
+	FilesId interface{} "files_id"
+	N       int
+	Data    []byte
+}
+
+type gfsCachedChunk struct {
+	wait sync.Mutex
+	n    int
+	data []byte
+	err  error
+}
+
+func newGridFS(db *Database, prefix string) *GridFS {
+	return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
+}
+
+func (gfs *GridFS) newFile() *GridFile {
+	file := &GridFile{gfs: gfs}
+	file.c.L = &file.m
+	//runtime.SetFinalizer(file, finalizeFile)
+	return file
+}
+
+func finalizeFile(file *GridFile) {
+	file.Close()
+}
+
+// Create creates a new file with the provided name in the GridFS.  If the file
+// name already exists, a new version will be inserted with an up-to-date
+// uploadDate that will cause it to be atomically visible to the Open and
+// OpenId methods.  If the file name is not important, an empty name may be
+// provided and the file Id used instead.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// A simple example inserting a new file:
+//
+//     func check(err error) {
+//         if err != nil {
+//             panic(err.String())
+//         }
+//     }
+//     file, err := db.GridFS("fs").Create("myfile.txt")
+//     check(err)
+//     n, err := file.Write([]byte("Hello world!")
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//     fmt.Printf("%d bytes written\n", n)
+//
+// The io.Writer interface is implemented by *GridFile and may be used to
+// help on the file creation.  For example:
+//
+//     file, err := db.GridFS("fs").Create("myfile.txt")
+//     check(err)
+//     messages, err := os.Open("/var/log/messages")
+//     check(err)
+//     defer messages.Close()
+//     err = io.Copy(file, messages)
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//
+func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
+	file = gfs.newFile()
+	file.mode = gfsWriting
+	file.wsum = md5.New()
+	file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
+	return
+}
+
+// OpenId returns the file with the provided id, for reading.
+// If the file isn't found, err will be set to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+//     func check(err error) {
+//         if err != nil {
+//             panic(err.String())
+//         }
+//     }
+//     file, err := db.GridFS("fs").OpenId(objid)
+//     check(err)
+//     b := make([]byte, 8192)
+//     n, err := file.Read(b)
+//     check(err)
+//     fmt.Println(string(b))
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//     fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it.  As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+//     file, err := db.GridFS("fs").OpenId(objid)
+//     check(err)
+//     err = io.Copy(os.Stdout, file)
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//
+func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
+	var doc gfsFile
+	err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
+	if err != nil {
+		return
+	}
+	file = gfs.newFile()
+	file.mode = gfsReading
+	file.doc = doc
+	return
+}
+
+// Open returns the most recently uploaded file with the provided
+// name, for reading. If the file isn't found, err will be set
+// to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+//     file, err := db.GridFS("fs").Open("myfile.txt")
+//     check(err)
+//     b := make([]byte, 8192)
+//     n, err := file.Read(b)
+//     check(err)
+//     fmt.Println(string(b))
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//     fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it.  As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+//     file, err := db.GridFS("fs").Open("myfile.txt")
+//     check(err)
+//     err = io.Copy(os.Stdout, file)
+//     check(err)
+//     err = file.Close()
+//     check(err)
+//
+func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
+	var doc gfsFile
+	err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
+	if err != nil {
+		return
+	}
+	file = gfs.newFile()
+	file.mode = gfsReading
+	file.doc = doc
+	return
+}
+
+// OpenNext opens the next file from iter for reading, sets *file to it,
+// and returns true on the success case. If no more documents are available
+// on iter or an error occurred, *file is set to nil and the result is false.
+// Errors will be available via iter.Err().
+//
+// The iter parameter must be an iterator on the GridFS files collection.
+// Using the GridFS.Find method is an easy way to obtain such an iterator,
+// but any iterator on the collection will work.
+//
+// If the provided *file is non-nil, OpenNext will close it before attempting
+// to iterate to the next element. This means that in a loop one only
+// has to worry about closing files when breaking out of the loop early
+// (break, return, or panic).
+//
+// For example:
+//
+//     gfs := db.GridFS("fs")
+//     query := gfs.Find(nil).Sort("filename")
+//     iter := query.Iter()
+//     var f *mgo.GridFile
+//     for gfs.OpenNext(iter, &f) {
+//         fmt.Printf("Filename: %s\n", f.Name())
+//     }
+//     if iter.Close() != nil {
+//         panic(iter.Close())
+//     }
+//
+func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
+	if *file != nil {
+		// Ignoring the error here shouldn't be a big deal
+		// as we're reading the file and the loop iteration
+		// for this file is finished.
+		_ = (*file).Close()
+	}
+	var doc gfsFile
+	if !iter.Next(&doc) {
+		*file = nil
+		return false
+	}
+	f := gfs.newFile()
+	f.mode = gfsReading
+	f.doc = doc
+	*file = f
+	return true
+}
+
+// Find runs query on GridFS's files collection and returns
+// the resulting Query.
+//
+// This logic:
+//
+//     gfs := db.GridFS("fs")
+//     iter := gfs.Find(nil).Iter()
+//
+// Is equivalent to:
+//
+//     files := db.C("fs" + ".files")
+//     iter := files.Find(nil).Iter()
+//
+func (gfs *GridFS) Find(query interface{}) *Query {
+	return gfs.Files.Find(query)
+}
+
+// RemoveId deletes the file with the provided id from the GridFS.
+func (gfs *GridFS) RemoveId(id interface{}) error {
+	err := gfs.Files.Remove(bson.M{"_id": id})
+	if err != nil {
+		return err
+	}
+	_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
+	return err
+}
+
+type gfsDocId struct {
+	Id interface{} "_id"
+}
+
+// Remove deletes all files with the provided name from the GridFS.
+func (gfs *GridFS) Remove(name string) (err error) {
+	iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
+	var doc gfsDocId
+	for iter.Next(&doc) {
+		if e := gfs.RemoveId(doc.Id); e != nil {
+			err = e
+		}
+	}
+	if err == nil {
+		err = iter.Close()
+	}
+	return err
+}
+
+func (file *GridFile) assertMode(mode gfsFileMode) {
+	switch file.mode {
+	case mode:
+		return
+	case gfsWriting:
+		panic("GridFile is open for writing")
+	case gfsReading:
+		panic("GridFile is open for reading")
+	case gfsClosed:
+		panic("GridFile is closed")
+	default:
+		panic("internal error: missing GridFile mode")
+	}
+}
+
+// SetChunkSize sets size of saved chunks.  Once the file is written to, it
+// will be split in blocks of that size and each block saved into an
+// independent chunk document.  The default chunk size is 255kb.
+//
+// It is a runtime error to call this function once the file has started
+// being written to.
+func (file *GridFile) SetChunkSize(bytes int) {
+	file.assertMode(gfsWriting)
+	debugf("GridFile %p: setting chunk size to %d", file, bytes)
+	file.m.Lock()
+	file.doc.ChunkSize = bytes
+	file.m.Unlock()
+}
+
+// Id returns the current file Id.
+func (file *GridFile) Id() interface{} {
+	return file.doc.Id
+}
+
+// SetId changes the current file Id.
+//
+// It is a runtime error to call this function once the file has started
+// being written to, or when the file is not open for writing.
+func (file *GridFile) SetId(id interface{}) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.Id = id
+	file.m.Unlock()
+}
+
+// Name returns the optional file name.  An empty string will be returned
+// in case it is unset.
+func (file *GridFile) Name() string {
+	return file.doc.Filename
+}
+
+// SetName changes the optional file name.  An empty string may be used to
+// unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetName(name string) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.Filename = name
+	file.m.Unlock()
+}
+
+// ContentType returns the optional file content type.  An empty string will be
+// returned in case it is unset.
+func (file *GridFile) ContentType() string {
+	return file.doc.ContentType
+}
+
+// ContentType changes the optional file content type.  An empty string may be
+// used to unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetContentType(ctype string) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.ContentType = ctype
+	file.m.Unlock()
+}
+
+// GetMeta unmarshals the optional "metadata" field associated with the
+// file into the result parameter. The meaning of keys under that field
+// is user-defined. For example:
+//
+//     result := struct{ INode int }{}
+//     err = file.GetMeta(&result)
+//     if err != nil {
+//         panic(err.String())
+//     }
+//     fmt.Printf("inode: %d\n", result.INode)
+//
+func (file *GridFile) GetMeta(result interface{}) (err error) {
+	file.m.Lock()
+	if file.doc.Metadata != nil {
+		err = bson.Unmarshal(file.doc.Metadata.Data, result)
+	}
+	file.m.Unlock()
+	return
+}
+
+// SetMeta changes the optional "metadata" field associated with the
+// file. The meaning of keys under that field is user-defined.
+// For example:
+//
+//     file.SetMeta(bson.M{"inode": inode})
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetMeta(metadata interface{}) {
+	file.assertMode(gfsWriting)
+	data, err := bson.Marshal(metadata)
+	file.m.Lock()
+	if err != nil && file.err == nil {
+		file.err = err
+	} else {
+		file.doc.Metadata = &bson.Raw{Data: data}
+	}
+	file.m.Unlock()
+}
+
+// Size returns the file size in bytes.
+func (file *GridFile) Size() (bytes int64) {
+	file.m.Lock()
+	bytes = file.doc.Length
+	file.m.Unlock()
+	return
+}
+
+// MD5 returns the file MD5 as a hex-encoded string.
+func (file *GridFile) MD5() (md5 string) {
+	return file.doc.MD5
+}
+
+// UploadDate returns the file upload time.
+func (file *GridFile) UploadDate() time.Time {
+	return file.doc.UploadDate
+}
+
+// SetUploadDate changes the file upload time.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetUploadDate(t time.Time) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	file.doc.UploadDate = t
+	file.m.Unlock()
+}
+
+// Close flushes any pending changes in case the file is being written
+// to, waits for any background operations to finish, and closes the file.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+func (file *GridFile) Close() (err error) {
+	file.m.Lock()
+	defer file.m.Unlock()
+	if file.mode == gfsWriting {
+		if len(file.wbuf) > 0 && file.err == nil {
+			file.insertChunk(file.wbuf)
+			file.wbuf = file.wbuf[0:0]
+		}
+		file.completeWrite()
+	} else if file.mode == gfsReading && file.rcache != nil {
+		file.rcache.wait.Lock()
+		file.rcache = nil
+	}
+	file.mode = gfsClosed
+	debugf("GridFile %p: closed", file)
+	return file.err
+}
+
+func (file *GridFile) completeWrite() {
+	for file.wpending > 0 {
+		debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
+		file.c.Wait()
+	}
+	if file.err == nil {
+		hexsum := hex.EncodeToString(file.wsum.Sum(nil))
+		if file.doc.UploadDate.IsZero() {
+			file.doc.UploadDate = bson.Now()
+		}
+		file.doc.MD5 = hexsum
+		file.err = file.gfs.Files.Insert(file.doc)
+	}
+	if file.err != nil {
+		file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
+	}
+	if file.err == nil {
+		index := Index{
+			Key:    []string{"files_id", "n"},
+			Unique: true,
+		}
+		file.err = file.gfs.Chunks.EnsureIndex(index)
+	}
+}
+
+// Abort cancels an in-progress write, preventing the file from being
+// automically created and ensuring previously written chunks are
+// removed when the file is closed.
+//
+// It is a runtime error to call Abort when the file was not opened
+// for writing.
+func (file *GridFile) Abort() {
+	if file.mode != gfsWriting {
+		panic("file.Abort must be called on file opened for writing")
+	}
+	file.err = errors.New("write aborted")
+}
+
+// Write writes the provided data to the file and returns the
+// number of bytes written and an error in case something
+// wrong happened.
+//
+// The file will internally cache the data so that all but the last
+// chunk sent to the database have the size defined by SetChunkSize.
+// This also means that errors may be deferred until a future call
+// to Write or Close.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Writer.
+func (file *GridFile) Write(data []byte) (n int, err error) {
+	file.assertMode(gfsWriting)
+	file.m.Lock()
+	debugf("GridFile %p: writing %d bytes", file, len(data))
+	defer file.m.Unlock()
+
+	if file.err != nil {
+		return 0, file.err
+	}
+
+	n = len(data)
+	file.doc.Length += int64(n)
+	chunkSize := file.doc.ChunkSize
+
+	if len(file.wbuf)+len(data) < chunkSize {
+		file.wbuf = append(file.wbuf, data...)
+		return
+	}
+
+	// First, flush file.wbuf complementing with data.
+	if len(file.wbuf) > 0 {
+		missing := chunkSize - len(file.wbuf)
+		if missing > len(data) {
+			missing = len(data)
+		}
+		file.wbuf = append(file.wbuf, data[:missing]...)
+		data = data[missing:]
+		file.insertChunk(file.wbuf)
+		file.wbuf = file.wbuf[0:0]
+	}
+
+	// Then, flush all chunks from data without copying.
+	for len(data) > chunkSize {
+		size := chunkSize
+		if size > len(data) {
+			size = len(data)
+		}
+		file.insertChunk(data[:size])
+		data = data[size:]
+	}
+
+	// And append the rest for a future call.
+	file.wbuf = append(file.wbuf, data...)
+
+	return n, file.err
+}
+
+func (file *GridFile) insertChunk(data []byte) {
+	n := file.chunk
+	file.chunk++
+	debugf("GridFile %p: adding to checksum: %q", file, string(data))
+	file.wsum.Write(data)
+
+	for file.doc.ChunkSize*file.wpending >= 1024*1024 {
+		// Hold on.. we got a MB pending.
+		file.c.Wait()
+		if file.err != nil {
+			return
+		}
+	}
+
+	file.wpending++
+
+	debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
+
+	// We may not own the memory of data, so rather than
+	// simply copying it, we'll marshal the document ahead of time.
+	data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
+	if err != nil {
+		file.err = err
+		return
+	}
+
+	go func() {
+		err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
+		file.m.Lock()
+		file.wpending--
+		if err != nil && file.err == nil {
+			file.err = err
+		}
+		file.c.Broadcast()
+		file.m.Unlock()
+	}()
+}
+
+// Seek sets the offset for the next Read or Write on file to
+// offset, interpreted according to whence: 0 means relative to
+// the origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end. It returns the new offset and
+// an error, if any.
+func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
+	file.m.Lock()
+	debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
+	defer file.m.Unlock()
+	switch whence {
+	case os.SEEK_SET:
+	case os.SEEK_CUR:
+		offset += file.offset
+	case os.SEEK_END:
+		offset += file.doc.Length
+	default:
+		panic("unsupported whence value")
+	}
+	if offset > file.doc.Length {
+		return file.offset, errors.New("seek past end of file")
+	}
+	if offset == file.doc.Length {
+		// If we're seeking to the end of the file,
+		// no need to read anything. This enables
+		// a client to find the size of the file using only the
+		// io.ReadSeeker interface with low overhead.
+		file.offset = offset
+		return file.offset, nil
+	}
+	chunk := int(offset / int64(file.doc.ChunkSize))
+	if chunk+1 == file.chunk && offset >= file.offset {
+		file.rbuf = file.rbuf[int(offset-file.offset):]
+		file.offset = offset
+		return file.offset, nil
+	}
+	file.offset = offset
+	file.chunk = chunk
+	file.rbuf = nil
+	file.rbuf, err = file.getChunk()
+	if err == nil {
+		file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
+	}
+	return file.offset, err
+}
+
+// Read reads into b the next available data from the file and
+// returns the number of bytes written and an error in case
+// something wrong happened.  At the end of the file, n will
+// be zero and err will be set to io.EOF.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Reader.
+func (file *GridFile) Read(b []byte) (n int, err error) {
+	file.assertMode(gfsReading)
+	file.m.Lock()
+	debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
+	defer file.m.Unlock()
+	if file.offset == file.doc.Length {
+		return 0, io.EOF
+	}
+	for err == nil {
+		i := copy(b, file.rbuf)
+		n += i
+		file.offset += int64(i)
+		file.rbuf = file.rbuf[i:]
+		if i == len(b) || file.offset == file.doc.Length {
+			break
+		}
+		b = b[i:]
+		file.rbuf, err = file.getChunk()
+	}
+	return n, err
+}
+
+func (file *GridFile) getChunk() (data []byte, err error) {
+	cache := file.rcache
+	file.rcache = nil
+	if cache != nil && cache.n == file.chunk {
+		debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
+		cache.wait.Lock()
+		data, err = cache.data, cache.err
+	} else {
+		debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
+		var doc gfsChunk
+		err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
+		data = doc.Data
+	}
+	file.chunk++
+	if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
+		// Read the next one in background.
+		cache = &gfsCachedChunk{n: file.chunk}
+		cache.wait.Lock()
+		debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
+		// Clone the session to avoid having it closed in between.
+		chunks := file.gfs.Chunks
+		session := chunks.Database.Session.Clone()
+		go func(id interface{}, n int) {
+			defer session.Close()
+			chunks = chunks.With(session)
+			var doc gfsChunk
+			cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
+			cache.data = doc.Data
+			cache.wait.Unlock()
+		}(file.doc.Id, file.chunk)
+		file.rcache = cache
+	}
+	debugf("Returning err: %#v", err)
+	return
+}

+ 708 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/gridfs_test.go

@@ -0,0 +1,708 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+	"io"
+	"os"
+	"time"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/mgo.v2-unstable"
+	"gopkg.in/mgo.v2-unstable/bson"
+)
+
+func (s *S) TestGridFSCreate(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	before := bson.Now()
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	n, err := file.Write([]byte("some data"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 9)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	after := bson.Now()
+
+	// Check the file information.
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	fileId, ok := result["_id"].(bson.ObjectId)
+	c.Assert(ok, Equals, true)
+	c.Assert(fileId.Valid(), Equals, true)
+	result["_id"] = "<id>"
+
+	ud, ok := result["uploadDate"].(time.Time)
+	c.Assert(ok, Equals, true)
+	c.Assert(ud.After(before) && ud.Before(after), Equals, true)
+	result["uploadDate"] = "<timestamp>"
+
+	expected := M{
+		"_id":        "<id>",
+		"length":     9,
+		"chunkSize":  255 * 1024,
+		"uploadDate": "<timestamp>",
+		"md5":        "1e50210a0202497fb79bc38b6ade6c34",
+	}
+	c.Assert(result, DeepEquals, expected)
+
+	// Check the chunk.
+	result = M{}
+	err = db.C("fs.chunks").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	chunkId, ok := result["_id"].(bson.ObjectId)
+	c.Assert(ok, Equals, true)
+	c.Assert(chunkId.Valid(), Equals, true)
+	result["_id"] = "<id>"
+
+	expected = M{
+		"_id":      "<id>",
+		"files_id": fileId,
+		"n":        0,
+		"data":     []byte("some data"),
+	}
+	c.Assert(result, DeepEquals, expected)
+
+	// Check that an index was created.
+	indexes, err := db.C("fs.chunks").Indexes()
+	c.Assert(err, IsNil)
+	c.Assert(len(indexes), Equals, 2)
+	c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
+}
+
+func (s *S) TestGridFSFileDetails(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile1.txt")
+	c.Assert(err, IsNil)
+
+	n, err := file.Write([]byte("some"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 4)
+
+	c.Assert(file.Size(), Equals, int64(4))
+
+	n, err = file.Write([]byte(" data"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 5)
+
+	c.Assert(file.Size(), Equals, int64(9))
+
+	id, _ := file.Id().(bson.ObjectId)
+	c.Assert(id.Valid(), Equals, true)
+	c.Assert(file.Name(), Equals, "myfile1.txt")
+	c.Assert(file.ContentType(), Equals, "")
+
+	var info interface{}
+	err = file.GetMeta(&info)
+	c.Assert(err, IsNil)
+	c.Assert(info, IsNil)
+
+	file.SetId("myid")
+	file.SetName("myfile2.txt")
+	file.SetContentType("text/plain")
+	file.SetMeta(M{"any": "thing"})
+
+	c.Assert(file.Id(), Equals, "myid")
+	c.Assert(file.Name(), Equals, "myfile2.txt")
+	c.Assert(file.ContentType(), Equals, "text/plain")
+
+	err = file.GetMeta(&info)
+	c.Assert(err, IsNil)
+	c.Assert(info, DeepEquals, bson.M{"any": "thing"})
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
+
+	ud := file.UploadDate()
+	now := time.Now()
+	c.Assert(ud.Before(now), Equals, true)
+	c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
+
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	result["uploadDate"] = "<timestamp>"
+
+	expected := M{
+		"_id":         "myid",
+		"length":      9,
+		"chunkSize":   255 * 1024,
+		"uploadDate":  "<timestamp>",
+		"md5":         "1e50210a0202497fb79bc38b6ade6c34",
+		"filename":    "myfile2.txt",
+		"contentType": "text/plain",
+		"metadata":    M{"any": "thing"},
+	}
+	c.Assert(result, DeepEquals, expected)
+}
+
+func (s *S) TestGridFSSetUploadDate(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
+	file.SetUploadDate(t)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	// Check the file information.
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	ud := result["uploadDate"].(time.Time)
+	if !ud.Equal(t) {
+		c.Fatalf("want upload date %s, got %s", t, ud)
+	}
+}
+
+func (s *S) TestGridFSCreateWithChunking(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	file.SetChunkSize(5)
+
+	// Smaller than the chunk size.
+	n, err := file.Write([]byte("abc"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+
+	// Boundary in the middle.
+	n, err = file.Write([]byte("defg"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 4)
+
+	// Boundary at the end.
+	n, err = file.Write([]byte("hij"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+
+	// Larger than the chunk size, with 3 chunks.
+	n, err = file.Write([]byte("klmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 12)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	// Check the file information.
+	result := M{}
+	err = db.C("fs.files").Find(nil).One(result)
+	c.Assert(err, IsNil)
+
+	fileId, _ := result["_id"].(bson.ObjectId)
+	c.Assert(fileId.Valid(), Equals, true)
+	result["_id"] = "<id>"
+	result["uploadDate"] = "<timestamp>"
+
+	expected := M{
+		"_id":        "<id>",
+		"length":     22,
+		"chunkSize":  5,
+		"uploadDate": "<timestamp>",
+		"md5":        "44a66044834cbe55040089cabfc102d5",
+	}
+	c.Assert(result, DeepEquals, expected)
+
+	// Check the chunks.
+	iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
+	dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
+	for i := 0; ; i++ {
+		result = M{}
+		if !iter.Next(result) {
+			if i != 5 {
+				c.Fatalf("Expected 5 chunks, got %d", i)
+			}
+			break
+		}
+		c.Assert(iter.Close(), IsNil)
+
+		result["_id"] = "<id>"
+
+		expected = M{
+			"_id":      "<id>",
+			"files_id": fileId,
+			"n":        i,
+			"data":     []byte(dataChunks[i]),
+		}
+		c.Assert(result, DeepEquals, expected)
+	}
+}
+
+func (s *S) TestGridFSAbort(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("some data"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 9)
+
+	var count int
+	for i := 0; i < 10; i++ {
+		count, err = db.C("fs.chunks").Count()
+		if count > 0 || err != nil {
+			break
+		}
+	}
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, 1)
+
+	file.Abort()
+
+	err = file.Close()
+	c.Assert(err, ErrorMatches, "write aborted")
+
+	count, err = db.C("fs.chunks").Count()
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, 0)
+}
+
+func (s *S) TestGridFSCloseConflict(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
+
+	// For a closing-time conflict
+	err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
+	c.Assert(err, IsNil)
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("foo.txt")
+	c.Assert(err, IsNil)
+
+	_, err = file.Write([]byte("some data"))
+	c.Assert(err, IsNil)
+
+	err = file.Close()
+	c.Assert(mgo.IsDup(err), Equals, true)
+
+	count, err := db.C("fs.chunks").Count()
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, 0)
+}
+
+func (s *S) TestGridFSOpenNotFound(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.OpenId("non-existent")
+	c.Assert(err == mgo.ErrNotFound, Equals, true)
+	c.Assert(file, IsNil)
+
+	file, err = gfs.Open("non-existent")
+	c.Assert(err == mgo.ErrNotFound, Equals, true)
+	c.Assert(file, IsNil)
+}
+
+func (s *S) TestGridFSReadAll(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+	id := file.Id()
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 22)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	file, err = gfs.OpenId(id)
+	c.Assert(err, IsNil)
+
+	b := make([]byte, 30)
+	n, err = file.Read(b)
+	c.Assert(n, Equals, 22)
+	c.Assert(err, IsNil)
+
+	n, err = file.Read(b)
+	c.Assert(n, Equals, 0)
+	c.Assert(err == io.EOF, Equals, true)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestGridFSReadChunking(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+
+	id := file.Id()
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 22)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	file, err = gfs.OpenId(id)
+	c.Assert(err, IsNil)
+
+	b := make([]byte, 30)
+
+	// Smaller than the chunk size.
+	n, err = file.Read(b[:3])
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+	c.Assert(b[:3], DeepEquals, []byte("abc"))
+
+	// Boundary in the middle.
+	n, err = file.Read(b[:4])
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 4)
+	c.Assert(b[:4], DeepEquals, []byte("defg"))
+
+	// Boundary at the end.
+	n, err = file.Read(b[:3])
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 3)
+	c.Assert(b[:3], DeepEquals, []byte("hij"))
+
+	// Larger than the chunk size, with 3 chunks.
+	n, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 12)
+	c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
+
+	n, err = file.Read(b)
+	c.Assert(n, Equals, 0)
+	c.Assert(err == io.EOF, Equals, true)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+}
+
+func (s *S) TestGridFSOpen(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	file.Close()
+
+	file, err = gfs.Open("myfile.txt")
+	c.Assert(err, IsNil)
+	defer file.Close()
+
+	var b [1]byte
+
+	_, err = file.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "2")
+}
+
+func (s *S) TestGridFSSeek(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+	file, err := gfs.Create("")
+	c.Assert(err, IsNil)
+	id := file.Id()
+
+	file.SetChunkSize(5)
+
+	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 22)
+
+	err = file.Close()
+	c.Assert(err, IsNil)
+
+	b := make([]byte, 5)
+
+	file, err = gfs.OpenId(id)
+	c.Assert(err, IsNil)
+
+	o, err := file.Seek(3, os.SEEK_SET)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(3))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("defgh"))
+
+	o, err = file.Seek(5, os.SEEK_CUR)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(13))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("nopqr"))
+
+	o, err = file.Seek(0, os.SEEK_END)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(22))
+	n, err = file.Read(b)
+	c.Assert(err, Equals, io.EOF)
+	c.Assert(n, Equals, 0)
+
+	o, err = file.Seek(-10, os.SEEK_END)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(12))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("mnopq"))
+
+	o, err = file.Seek(8, os.SEEK_SET)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(8))
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("ijklm"))
+
+	// Trivial seek forward within same chunk. Already
+	// got the data, shouldn't touch the database.
+	sent := mgo.GetStats().SentOps
+	o, err = file.Seek(1, os.SEEK_CUR)
+	c.Assert(err, IsNil)
+	c.Assert(o, Equals, int64(14))
+	c.Assert(mgo.GetStats().SentOps, Equals, sent)
+	_, err = file.Read(b)
+	c.Assert(err, IsNil)
+	c.Assert(b, DeepEquals, []byte("opqrs"))
+
+	// Try seeking past end of file.
+	file.Seek(3, os.SEEK_SET)
+	o, err = file.Seek(23, os.SEEK_SET)
+	c.Assert(err, ErrorMatches, "seek past end of file")
+	c.Assert(o, Equals, int64(3))
+}
+
+func (s *S) TestGridFSRemoveId(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	id := file.Id()
+	file.Close()
+
+	err = gfs.RemoveId(id)
+	c.Assert(err, IsNil)
+
+	file, err = gfs.Open("myfile.txt")
+	c.Assert(err, IsNil)
+	defer file.Close()
+
+	var b [1]byte
+
+	_, err = file.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "1")
+
+	n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestGridFSRemove(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	file.Close()
+
+	err = gfs.Remove("myfile.txt")
+	c.Assert(err, IsNil)
+
+	_, err = gfs.Open("myfile.txt")
+	c.Assert(err == mgo.ErrNotFound, Equals, true)
+
+	n, err := db.C("fs.chunks").Find(nil).Count()
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestGridFSOpenNext(c *C) {
+	session, err := mgo.Dial("localhost:40011")
+	c.Assert(err, IsNil)
+	defer session.Close()
+
+	db := session.DB("mydb")
+
+	gfs := db.GridFS("fs")
+
+	file, err := gfs.Create("myfile1.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'1'})
+	file.Close()
+
+	file, err = gfs.Create("myfile2.txt")
+	c.Assert(err, IsNil)
+	file.Write([]byte{'2'})
+	file.Close()
+
+	var f *mgo.GridFile
+	var b [1]byte
+
+	iter := gfs.Find(nil).Sort("-filename").Iter()
+
+	ok := gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, true)
+	c.Check(f.Name(), Equals, "myfile2.txt")
+
+	_, err = f.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "2")
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, true)
+	c.Check(f.Name(), Equals, "myfile1.txt")
+
+	_, err = f.Read(b[:])
+	c.Assert(err, IsNil)
+	c.Assert(string(b[:]), Equals, "1")
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, false)
+	c.Assert(iter.Close(), IsNil)
+	c.Assert(f, IsNil)
+
+	// Do it again with a more restrictive query to make sure
+	// it's actually taken into account.
+	iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, true)
+	c.Check(f.Name(), Equals, "myfile1.txt")
+
+	ok = gfs.OpenNext(iter, &f)
+	c.Assert(ok, Equals, false)
+	c.Assert(iter.Close(), IsNil)
+	c.Assert(f, IsNil)
+}

+ 20 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.crt

@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
+-----END CERTIFICATE-----

+ 27 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.key

@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
+-----END RSA PRIVATE KEY-----

+ 57 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.pem

@@ -0,0 +1,57 @@
+To regenerate the key:
+
+   openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key
+   cat server.key server.crt > server.pem
+   openssl genrsa -out client.key 2048
+   openssl req -key client.key -new -out client.req
+   openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt
+   cat client.key client.crt > client.pem
+
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
+-----END RSA PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
+-----END CERTIFICATE-----
+

+ 17 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/client.req

@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE
+BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ
+bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/
+h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4
+GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt
+CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M
+3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF
+zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8
+8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV
+sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d
+ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk
+ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv
+KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM
+vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg
+tb1s0MA=
+-----END CERTIFICATE REQUEST-----

+ 22 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/server.crt

@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
+-----END CERTIFICATE-----

+ 28 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/server.key

@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----

+ 50 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/certs/server.pem

@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
+-----END CERTIFICATE-----

+ 57 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/.env

@@ -0,0 +1,57 @@
+
+set -e
+
+MONGOVERSION=$(mongod --version | sed -n 's/.*v\([0-9]\+\.[0-9]\+\)\..*/\1/p')
+MONGOMAJOR=$(echo $MONGOVERSION | sed 's/\([0-9]\+\)\..*/\1/')
+MONGOMINOR=$(echo $MONGOVERSION | sed 's/[0-9]\+\.\([0-9]\+\)/\1/')
+
+versionAtLeast() {
+	TESTMAJOR="$1"
+	TESTMINOR="$2"
+	if [ "$MONGOMAJOR" -gt "$TESTMAJOR" ]; then
+		return 0
+	fi
+	if [ "$MONGOMAJOR" -lt "$TESTMAJOR" ]; then
+		return 100
+	fi
+	if [ "$MONGOMINOR" -ge "$TESTMINOR" ]; then
+		return 0
+	fi
+	return 100
+}
+
+COMMONDOPTSNOIP="
+	--nohttpinterface
+	--noprealloc
+	--nojournal
+	--smallfiles
+	--nssize=1
+	--oplogSize=1
+	--dbpath ./db
+	"
+COMMONDOPTS="
+	$COMMONDOPTSNOIP
+	--bind_ip=127.0.0.1
+	"
+COMMONCOPTS="
+	$COMMONDOPTS
+	"
+COMMONSOPTS="
+	--chunkSize 1
+	--bind_ip=127.0.0.1
+	"
+
+if versionAtLeast 3 2; then
+	# 3.2 doesn't like --nojournal on config servers.
+	#COMMONCOPTS="$(echo "$COMMONCOPTS" | sed '/--nojournal/d')"
+	# Using a hacked version of MongoDB 3.2 for now.
+
+	# Go back to MMAPv1 so it's not super sluggish. :-(
+	COMMONDOPTSNOIP="--storageEngine=mmapv1 $COMMONDOPTSNOIP"
+	COMMONDOPTS="--storageEngine=mmapv1 $COMMONDOPTS"
+	COMMONCOPTS="--storageEngine=mmapv1 $COMMONCOPTS"
+fi
+
+if [ "$TRAVIS" = true ]; then
+	set -x
+fi

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/db/.empty


BIN
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/db/journal/tempLatencyTest


+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/db/mongod.lock


+ 3 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg1/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+	--port 40101 \
+	--configsvr
+

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg2/db/.empty


+ 3 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg2/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg2/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+	--port 40102 \
+	--configsvr
+

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg3/db/.empty


+ 3 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg3/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 9 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/cfg3/run

@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+	--port 40103 \
+	--configsvr \
+	--auth \
+	--keyFile=../../certs/keyfile

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db1/db/.empty


+ 3 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db1/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 15 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db1/run

@@ -0,0 +1,15 @@
+#!/bin/sh
+
+. ../.env
+
+if [ x$NOIPV6 = x1 ]; then
+	BINDIP="127.0.0.1"
+else
+	BINDIP="127.0.0.1,::1"
+fi
+
+exec mongod $COMMONDOPTSNOIP \
+	--shardsvr \
+	--bind_ip=$BINDIP \
+	--port 40001 \
+	--ipv6

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db2/db/.empty


+ 3 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db2/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 8 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db2/run

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+	--shardsvr \
+	--port 40002 \
+	--auth

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db3/db/.empty


+ 3 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db3/log/run

@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt

+ 0 - 0
backend/src/vendor/gopkg.in/mgo.v2-unstable/harness/daemons/db3/run


이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.