Documentation ¶
Overview ¶
The mgo ("mango") rich MongoDB driver for Go.
The mgo project (pronounced as "mango") is a rich MongoDB driver for the Go language. High-level details about the project may be found at its web page:
http://labix.org/mgo
Usage of the driver revolves around the concept of sessions. To get started, obtain a session using the Dial function:
session, err := mgo.Dial(url)
This will establish one or more connections with the cluster of servers defined by the url parameter. From then on, the cluster may be queried with multiple consistency rules (see SetMode) and documents retrieved with statements such as:
c := session.DB(database).C(collection) err := c.Find(query).One(&result)
New sessions may be created by calling New, Copy, or Clone on an initial session. These spawned sessions will share the same cluster information and connection cache, and may be easily handed into other methods and functions for organizing logic. Every session created must have its Close method called at the end of its use.
For more details, see the documentation for the types and methods.
Index ¶
- Constants
- Variables
- func IsDup(err error) bool
- func ResetStats()
- func SetDebug(debug bool)
- func SetLogger(logger log_Logger)
- func SetStats(enabled bool)
- type BuildInfo
- type Change
- type ChangeInfo
- type Collection
- func (c *Collection) Count() (n int, err error)
- func (c *Collection) Create(info *CollectionInfo) error
- func (c *Collection) DropCollection() error
- func (c *Collection) DropIndex(key ...string) error
- func (c *Collection) EnsureIndex(index Index) error
- func (c *Collection) EnsureIndexKey(key ...string) error
- func (c *Collection) Find(query interface{}) *Query
- func (c *Collection) FindId(id interface{}) *Query
- func (c *Collection) Indexes() (indexes []Index, err error)
- func (c *Collection) Insert(docs ...interface{}) error
- func (c *Collection) Pipe(pipeline interface{}) *Pipe
- func (c *Collection) Remove(selector interface{}) error
- func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error)
- func (c *Collection) RemoveId(id interface{}) error
- func (c *Collection) Update(selector interface{}, change interface{}) error
- func (c *Collection) UpdateAll(selector interface{}, change interface{}) (info *ChangeInfo, err error)
- func (c *Collection) UpdateId(id interface{}, change interface{}) error
- func (c *Collection) Upsert(selector interface{}, change interface{}) (info *ChangeInfo, err error)
- func (c *Collection) UpsertId(id interface{}, change interface{}) (info *ChangeInfo, err error)
- func (c *Collection) With(s *Session) *Collection
- type CollectionInfo
- type DBRef
- type Database
- func (db *Database) AddUser(user, pass string, readOnly bool) error
- func (db *Database) C(name string) *Collection
- func (db *Database) CollectionNames() (names []string, err error)
- func (db *Database) DropDatabase() error
- func (db *Database) FindRef(ref *DBRef) *Query
- func (db *Database) GridFS(prefix string) *GridFS
- func (db *Database) Login(user, pass string) (err error)
- func (db *Database) Logout()
- func (db *Database) RemoveUser(user string) error
- func (db *Database) Run(cmd interface{}, result interface{}) error
- func (db *Database) UpsertUser(user *User) error
- func (db *Database) With(s *Session) *Database
- type DialInfo
- type GridFS
- func (gfs *GridFS) Create(name string) (file *GridFile, err error)
- func (gfs *GridFS) Find(query interface{}) *Query
- func (gfs *GridFS) Open(name string) (file *GridFile, err error)
- func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error)
- func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool
- func (gfs *GridFS) Remove(name string) (err error)
- func (gfs *GridFS) RemoveId(id interface{}) error
- type GridFile
- func (file *GridFile) Close() (err error)
- func (file *GridFile) ContentType() string
- func (file *GridFile) GetMeta(result interface{}) (err error)
- func (file *GridFile) Id() interface{}
- func (file *GridFile) MD5() (md5 string)
- func (file *GridFile) Name() string
- func (file *GridFile) Read(b []byte) (n int, err error)
- func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error)
- func (file *GridFile) SetChunkSize(bytes int)
- func (file *GridFile) SetContentType(ctype string)
- func (file *GridFile) SetId(id interface{})
- func (file *GridFile) SetMeta(metadata interface{})
- func (file *GridFile) SetName(name string)
- func (file *GridFile) Size() (bytes int64)
- func (file *GridFile) UploadDate() time.Time
- func (file *GridFile) Write(data []byte) (n int, err error)
- type Index
- type Iter
- type LastError
- type MapReduce
- type MapReduceInfo
- type MapReduceTime
- type Pipe
- type Query
- func (q *Query) All(result interface{}) error
- func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err error)
- func (q *Query) Batch(n int) *Query
- func (q *Query) Count() (n int, err error)
- func (q *Query) Distinct(key string, result interface{}) error
- func (q *Query) Explain(result interface{}) error
- func (q *Query) For(result interface{}, f func() error) error
- func (q *Query) Hint(indexKey ...string) *Query
- func (q *Query) Iter() *Iter
- func (q *Query) Limit(n int) *Query
- func (q *Query) LogReplay() *Query
- func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceInfo, err error)
- func (q *Query) One(result interface{}) (err error)
- func (q *Query) Prefetch(p float64) *Query
- func (q *Query) Select(selector interface{}) *Query
- func (q *Query) Skip(n int) *Query
- func (q *Query) Snapshot() *Query
- func (q *Query) Sort(fields ...string) *Query
- func (q *Query) Tail(timeout time.Duration) *Iter
- type QueryError
- type Role
- type Safe
- type Session
- func (s *Session) BuildInfo() (info BuildInfo, err error)
- func (s *Session) Clone() *Session
- func (s *Session) Close()
- func (s *Session) Copy() *Session
- func (s *Session) DB(name string) *Database
- func (s *Session) DatabaseNames() (names []string, err error)
- func (s *Session) EnsureSafe(safe *Safe)
- func (s *Session) FindRef(ref *DBRef) *Query
- func (s *Session) Fsync(async bool) error
- func (s *Session) FsyncLock() error
- func (s *Session) FsyncUnlock() error
- func (s *Session) LiveServers() (addrs []string)
- func (s *Session) LogoutAll()
- func (s *Session) Mode() mode
- func (s *Session) New() *Session
- func (s *Session) Ping() error
- func (s *Session) Refresh()
- func (s *Session) ResetIndexCache()
- func (s *Session) Run(cmd interface{}, result interface{}) error
- func (s *Session) Safe() (safe *Safe)
- func (s *Session) SelectServers(tags ...bson.D)
- func (s *Session) SetBatch(n int)
- func (s *Session) SetMode(consistency mode, refresh bool)
- func (s *Session) SetPrefetch(p float64)
- func (s *Session) SetSafe(safe *Safe)
- func (s *Session) SetSocketTimeout(d time.Duration)
- func (s *Session) SetSyncTimeout(d time.Duration)
- type Stats
- type User
Constants ¶
const ( Eventual mode = 0 Monotonic mode = 1 Strong mode = 2 )
Variables ¶
var ErrNotFound = errors.New("not found")
Functions ¶
func IsDup ¶
IsDup returns whether err informs of a duplicate key error because a primary key index or a secondary unique index already has an entry with the given value.
func ResetStats ¶
func ResetStats()
func SetDebug ¶
func SetDebug(debug bool)
Enable the delivery of debug messages to the logger. Only meaningful if a logger is also set.
Types ¶
type BuildInfo ¶
type BuildInfo struct { Version string VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise GitVersion string `bson:"gitVersion"` SysInfo string `bson:"sysInfo"` Bits int Debug bool MaxObjectSize int `bson:"maxBsonObjectSize"` }
The BuildInfo type encapsulates details about the running MongoDB server.
Note that the VersionArray field was introduced in MongoDB 2.0+, but it is internally assembled from the Version information for previous versions. In both cases, VersionArray is guaranteed to have at least 4 entries.
type ChangeInfo ¶
type ChangeInfo struct { Updated int // Number of existing documents updated Removed int // Number of documents removed UpsertedId interface{} // Upserted _id field, when not explicitly provided }
ChangeInfo holds details about the outcome of a change operation.
type Collection ¶
type Collection struct { Database *Database Name string // "collection" FullName string // "db.collection" }
func (*Collection) Count ¶
func (c *Collection) Count() (n int, err error)
Count returns the total number of documents in the collection.
func (*Collection) Create ¶
func (c *Collection) Create(info *CollectionInfo) error
Create explicitly creates the c collection with details of info. MongoDB creates collections automatically on use, so this method is only necessary when creating collection with non-default characteristics, such as capped collections.
Relevant documentation:
http://www.mongodb.org/display/DOCS/createCollection+Command http://www.mongodb.org/display/DOCS/Capped+Collections
func (*Collection) DropCollection ¶
func (c *Collection) DropCollection() error
DropCollection removes the entire collection including all of its documents.
func (*Collection) DropIndex ¶
func (c *Collection) DropIndex(key ...string) error
DropIndex removes the index with key from the collection.
The key value determines which fields compose the index. The index ordering will be ascending by default. To obtain an index with a descending order, the field name should be prefixed by a dash (e.g. []string{"-time"}).
For example:
err := collection.DropIndex("lastname", "firstname")
See the EnsureIndex method for more details on indexes.
func (*Collection) EnsureIndex ¶
func (c *Collection) EnsureIndex(index Index) error
EnsureIndex ensures an index with the given key exists, creating it with the provided parameters if necessary.
Once EnsureIndex returns successfully, following requests for the same index will not contact the server unless Collection.DropIndex is used to drop the same index, or Session.ResetIndexCache is called.
For example:
index := Index{ Key: []string{"lastname", "firstname"}, Unique: true, DropDups: true, Background: true, // See notes. Sparse: true, } err := collection.EnsureIndex(index)
The Key value determines which fields compose the index. The index ordering will be ascending by default. To obtain an index with a descending order, the field name should be prefixed by a dash (e.g. []string{"-time"}).
If Unique is true, the index must necessarily contain only a single document per Key. With DropDups set to true, documents with the same key as a previously indexed one will be dropped rather than an error returned.
If Background is true, other connections will be allowed to proceed using the collection without the index while it's being built. Note that the session executing EnsureIndex will be blocked for as long as it takes for the index to be built.
If Sparse is true, only documents containing the provided Key fields will be included in the index. When using a sparse index for sorting, only indexed documents will be returned.
If ExpireAfter is non-zero, the server will periodically scan the collection and remove documents containing an indexed time.Time field with a value older than ExpireAfter. See the documentation for details:
http://docs.mongodb.org/manual/tutorial/expire-data
Other kinds of indexes are also supported through that API. Here is an example:
index := Index{ Key: []string{"$2d:loc"}, Bits: 26, } err := collection.EnsureIndex(index)
The example above requests the creation of a "2d" index for the "loc" field.
The 2D index bounds may be changed using the Min and Max attributes of the Index value. The default bound setting of (-180, 180) is suitable for latitude/longitude pairs.
The Bits parameter sets the precision of the 2D geohash values. If not provided, 26 bits are used, which is roughly equivalent to 1 foot of precision for the default (-180, 180) index bounds.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Indexes http://www.mongodb.org/display/DOCS/Indexing+Advice+and+FAQ http://www.mongodb.org/display/DOCS/Indexing+as+a+Background+Operation http://www.mongodb.org/display/DOCS/Geospatial+Indexing http://www.mongodb.org/display/DOCS/Multikeys
func (*Collection) EnsureIndexKey ¶
func (c *Collection) EnsureIndexKey(key ...string) error
EnsureIndexKey ensures an index with the given key exists, creating it if necessary.
This example:
err := collection.EnsureIndexKey("a", "b")
Is equivalent to:
err := collection.EnsureIndex(mgo.Index{Key: []string{"a", "b"}})
See the EnsureIndex method for more details.
func (*Collection) Find ¶
func (c *Collection) Find(query interface{}) *Query
Find prepares a query using the provided document. The document may be a map or a struct value capable of being marshalled with bson. The map may be a generic one using interface{} for its key and/or values, such as bson.M, or it may be a properly typed map. Providing nil as the document is equivalent to providing an empty document such as bson.M{}.
Further details of the query may be tweaked using the resulting Query value, and then executed to retrieve results using methods such as One, For, Iter, or Tail.
In case the resulting document includes a field named $err or errmsg, which are standard ways for MongoDB to return query errors, the returned err will be set to a *QueryError value including the Err message and the Code. In those cases, the result argument is still unmarshalled into with the received document so that any other custom values may be obtained if desired.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Querying http://www.mongodb.org/display/DOCS/Advanced+Queries
func (*Collection) FindId ¶
func (c *Collection) FindId(id interface{}) *Query
FindId is a convenience helper equivalent to:
query := collection.Find(bson.M{"_id": id})
See the Find method for more details.
func (*Collection) Indexes ¶
func (c *Collection) Indexes() (indexes []Index, err error)
Indexes returns a list of all indexes for the collection.
For example, this snippet would drop all available indexes:
indexes, err := collection.Indexes() if err != nil { return err } for _, index := range indexes { err = collection.DropIndex(index.Key...) if err != nil { return err } }
See the EnsureIndex method for more details on indexes.
func (*Collection) Insert ¶
func (c *Collection) Insert(docs ...interface{}) error
Insert inserts one or more documents in the respective collection. In case the session is in safe mode (see the SetSafe method) and an error happens while inserting the provided documents, the returned error will be of type *LastError.
func (*Collection) Pipe ¶
func (c *Collection) Pipe(pipeline interface{}) *Pipe
Pipe prepares a pipeline to aggregate. The pipeline document must be a slice built in terms of the aggregation framework language.
For example:
pipe := collection.Pipe([]bson.M{{"$match": bson.M{"name": "Otavio"}}}) iter := pipe.Iter()
Relevant documentation:
http://docs.mongodb.org/manual/reference/aggregation http://docs.mongodb.org/manual/applications/aggregation http://docs.mongodb.org/manual/tutorial/aggregation-examples
func (*Collection) Remove ¶
func (c *Collection) Remove(selector interface{}) error
Remove finds a single document matching the provided selector document and removes it from the database. If the session is in safe mode (see SetSafe) a ErrNotFound error is returned if a document isn't found, or a value of type *LastError when some other error is detected.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Removing
func (*Collection) RemoveAll ¶
func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error)
RemoveAll finds all documents matching the provided selector document and removes them from the database. In case the session is in safe mode (see the SetSafe method) and an error happens when attempting the change, the returned error will be of type *LastError.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Removing
func (*Collection) RemoveId ¶
func (c *Collection) RemoveId(id interface{}) error
RemoveId is a convenience helper equivalent to:
err := collection.Remove(bson.M{"_id": id})
See the Remove method for more details.
func (*Collection) Update ¶
func (c *Collection) Update(selector interface{}, change interface{}) error
Update finds a single document matching the provided selector document and modifies it according to the change document. If the session is in safe mode (see SetSafe) a ErrNotFound error is returned if a document isn't found, or a value of type *LastError when some other error is detected.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Updating http://www.mongodb.org/display/DOCS/Atomic+Operations
func (*Collection) UpdateAll ¶
func (c *Collection) UpdateAll(selector interface{}, change interface{}) (info *ChangeInfo, err error)
UpdateAll finds all documents matching the provided selector document and modifies them according to the change document. If the session is in safe mode (see SetSafe) details of the executed operation are returned in info or an error of type *LastError when some problem is detected. It is not an error for the update to not be applied on any documents because the selector doesn't match.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Updating http://www.mongodb.org/display/DOCS/Atomic+Operations
func (*Collection) UpdateId ¶
func (c *Collection) UpdateId(id interface{}, change interface{}) error
UpdateId is a convenience helper equivalent to:
err := collection.Update(bson.M{"_id": id}, change)
See the Update method for more details.
func (*Collection) Upsert ¶
func (c *Collection) Upsert(selector interface{}, change interface{}) (info *ChangeInfo, err error)
Upsert finds a single document matching the provided selector document and modifies it according to the change document. If no document matching the selector is found, the change document is applied to the selector document and the result is inserted in the collection. If the session is in safe mode (see SetSafe) details of the executed operation are returned in info, or an error of type *LastError when some problem is detected.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Updating http://www.mongodb.org/display/DOCS/Atomic+Operations
func (*Collection) UpsertId ¶
func (c *Collection) UpsertId(id interface{}, change interface{}) (info *ChangeInfo, err error)
UpsertId is a convenience helper equivalent to:
info, err := collection.Upsert(bson.M{"_id": id}, change)
See the Upsert method for more details.
func (*Collection) With ¶
func (c *Collection) With(s *Session) *Collection
With returns a copy of c that uses session s.
type CollectionInfo ¶
type CollectionInfo struct { // DisableIdIndex prevents the automatic creation of the index // on the _id field for the collection. DisableIdIndex bool // ForceIdIndex enforces the automatic creation of the index // on the _id field for the collection. Capped collections, // for example, do not have such an index by default. ForceIdIndex bool // If Capped is true new documents will replace old ones when // the collection is full. MaxBytes must necessarily be set // to define the size when the collection wraps around. // MaxDocs optionally defines the number of documents when it // wraps, but MaxBytes still needs to be set. Capped bool MaxBytes int MaxDocs int }
The CollectionInfo type holds metadata about a collection.
Relevant documentation:
http://www.mongodb.org/display/DOCS/createCollection+Command http://www.mongodb.org/display/DOCS/Capped+Collections
type DBRef ¶
type DBRef struct { Collection string `bson:"$ref"` Id interface{} `bson:"$id"` Database string `bson:"$db,omitempty"` }
The DBRef type implements support for the database reference MongoDB convention as supported by multiple drivers. This convention enables cross-referencing documents between collections and databases using a structure which includes a collection name, a document id, and optionally a database name.
See the FindRef methods on Session and on Database.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Database+References
type Database ¶
func (*Database) AddUser ¶
AddUser creates or updates the authentication credentials of user within the db database.
This method is obsolete and should only be used with MongoDB 2.2 or earlier. For MongoDB 2.4 and on, use UpsertUser instead.
func (*Database) C ¶
func (db *Database) C(name string) *Collection
C returns a value representing the named collection.
Creating this value is a very lightweight operation, and involves no network communication.
func (*Database) CollectionNames ¶
CollectionNames returns the collection names present in database.
func (*Database) DropDatabase ¶
DropDatabase removes the entire database including all of its collections.
func (*Database) FindRef ¶
FindRef returns a query that looks for the document in the provided reference. If the reference includes the DB field, the document will be retrieved from the respective database.
See also the DBRef type and the FindRef method on Session.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Database+References
func (*Database) GridFS ¶
GridFS returns a GridFS value representing collections in db that follow the standard GridFS specification. The provided prefix (sometimes known as root) will determine which collections to use, and is usually set to "fs" when there is a single GridFS in the database.
See the GridFS Create, Open, and OpenId methods for more details.
Relevant documentation:
http://www.mongodb.org/display/DOCS/GridFS http://www.mongodb.org/display/DOCS/GridFS+Tools http://www.mongodb.org/display/DOCS/GridFS+Specification
func (*Database) Login ¶
Login authenticates against MongoDB with the provided credentials. The authentication is valid for the whole session and will stay valid until Logout is explicitly called for the same database, or the session is closed.
Concurrent Login calls will work correctly.
func (*Database) Logout ¶
func (db *Database) Logout()
Logout removes any established authentication credentials for the database.
func (*Database) RemoveUser ¶
RemoveUser removes the authentication credentials of user from the database.
func (*Database) Run ¶
Run issues the provided command against the database and unmarshals its result in the respective argument. The cmd argument may be either a string with the command name itself, in which case an empty document of the form bson.M{cmd: 1} will be used, or it may be a full command document.
Note that MongoDB considers the first marshalled key as the command name, so when providing a command with options, it's important to use an ordering-preserving document, such as a struct value or an instance of bson.D. For instance:
db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}})
For privilleged commands typically run against the "admin" database, see the Run method in the Session type.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Commands http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips
func (*Database) UpsertUser ¶
UpsertUser updates the authentication credentials and the roles for a MongoDB user within the db database. If the named user doesn't exist it will be created.
This method should only be used from MongoDB 2.4 and on. For older MongoDB releases, use the obsolete AddUser method instead.
Relevant documentation:
http://docs.mongodb.org/manual/reference/user-privileges/ http://docs.mongodb.org/manual/reference/privilege-documents/
type DialInfo ¶
type DialInfo struct { // Addrs holds the addresses for the seed servers. Addrs []string // Direct informs whether to establish connections only with the // specified seed servers, or to obtain information for the whole // cluster and establish connections with further servers too. Direct bool // Timeout is the amount of time to wait for a server to respond when // first connecting and on follow up operations in the session. If // timeout is zero, the call may block forever waiting for a connection // to be established. Timeout time.Duration // Database is the database name used during the initial authentication. // If set, the value is also returned as the default result from the // Session.DB method, in place of "test". Database string // Username and Password inform the credentials for the initial // authentication done against Database, if that is set, // or the "admin" database otherwise. See the Session.Login method too. Username string Password string // Dial optionally specifies the dial function for creating connections. // At the moment addr will have type *net.TCPAddr, but other types may // be provided in the future, so check and fail if necessary. Dial func(addr net.Addr) (net.Conn, error) }
DialInfo holds options for establishing a session with a MongoDB cluster. To use a URL, see the Dial function.
type GridFS ¶
type GridFS struct { Files *Collection Chunks *Collection }
func (*GridFS) Create ¶
Create creates a new file with the provided name in the GridFS. If the file name already exists, a new version will be inserted with an up-to-date uploadDate that will cause it to be atomically visible to the Open and OpenId methods. If the file name is not important, an empty name may be provided and the file Id used instead.
It's important to Close files whether they are being written to or read from, and to check the err result to ensure the operation completed successfully.
A simple example inserting a new file:
func check(err os.Error) { if err != nil { panic(err.String()) } } file, err := db.GridFS("fs").Create("myfile.txt") check(err) n, err := file.Write([]byte("Hello world!") check(err) err = file.Close() check(err) fmt.Printf("%d bytes written\n", n)
The io.Writer interface is implemented by *GridFile and may be used to help on the file creation. For example:
file, err := db.GridFS("fs").Create("myfile.txt") check(err) messages, err := os.Open("/var/log/messages") check(err) defer messages.Close() err = io.Copy(file, messages) check(err) err = file.Close() check(err)
func (*GridFS) Find ¶
Find runs query on GridFS's files collection and returns the resulting Query.
This logic:
gfs := db.GridFS("fs") iter := gfs.Find(nil).Iter()
Is equivalent to:
files := db.C("fs" + ".files") iter := files.Find(nil).Iter()
func (*GridFS) Open ¶
Open returns the most recently uploaded file with the provided name, for reading. If the file isn't found, err will be set to mgo.ErrNotFound.
It's important to Close files whether they are being written to or read from, and to check the err result to ensure the operation completed successfully.
The following example will print the first 8192 bytes from the file:
file, err := db.GridFS("fs").Open("myfile.txt") check(err) b := make([]byte, 8192) n, err := file.Read(b) check(err) fmt.Println(string(b)) check(err) err = file.Close() check(err) fmt.Printf("%d bytes read\n", n)
The io.Reader interface is implemented by *GridFile and may be used to deal with it. As an example, the following snippet will dump the whole file into the standard output:
file, err := db.GridFS("fs").Open("myfile.txt") check(err) err = io.Copy(os.Stdout, file) check(err) err = file.Close() check(err)
func (*GridFS) OpenId ¶
OpenId returns the file with the provided id, for reading. If the file isn't found, err will be set to mgo.ErrNotFound.
It's important to Close files whether they are being written to or read from, and to check the err result to ensure the operation completed successfully.
The following example will print the first 8192 bytes from the file:
func check(err os.Error) { if err != nil { panic(err.String()) } } file, err := db.GridFS("fs").OpenId(objid) check(err) b := make([]byte, 8192) n, err := file.Read(b) check(err) fmt.Println(string(b)) check(err) err = file.Close() check(err) fmt.Printf("%d bytes read\n", n)
The io.Reader interface is implemented by *GridFile and may be used to deal with it. As an example, the following snippet will dump the whole file into the standard output:
file, err := db.GridFS("fs").OpenId(objid) check(err) err = io.Copy(os.Stdout, file) check(err) err = file.Close() check(err)
func (*GridFS) OpenNext ¶
OpenNext opens the next file from iter for reading, sets *file to it, and returns true on the success case. If no more documents are available on iter or an error occurred, *file is set to nil and the result is false. Errors will be available via iter.Err().
The iter parameter must be an iterator on the GridFS files collection. Using the GridFS.Find method is an easy way to obtain such an iterator, but any iterator on the collection will work.
If the provided *file is non-nil, OpenNext will close it before attempting to iterate to the next element. This means that in a loop one only has to worry about closing files when breaking out of the loop early (break, return, or panic).
For example:
gfs := db.GridFS("fs") query := gfs.Find(nil).Sort("filename") iter := query.Iter() var f *mgo.GridFile for gfs.OpenNext(iter, &f) { fmt.Printf("Filename: %s\n", f.Name()) } if iter.Close() != nil { panic(iter.Close()) }
type GridFile ¶
type GridFile struct {
// contains filtered or unexported fields
}
func (*GridFile) Close ¶
Close flushes any pending changes in case the file is being written to, waits for any background operations to finish, and closes the file.
It's important to Close files whether they are being written to or read from, and to check the err result to ensure the operation completed successfully.
func (*GridFile) ContentType ¶
ContentType returns the optional file content type. An empty string will be returned in case it is unset.
func (*GridFile) GetMeta ¶
GetMeta unmarshals the optional "metadata" field associated with the file into the result parameter. The meaning of keys under that field is user-defined. For example:
result := struct{ INode int }{} err = file.GetMeta(&result) if err != nil { panic(err.String()) } fmt.Printf("inode: %d\n", result.INode)
func (*GridFile) Name ¶
Name returns the optional file name. An empty string will be returned in case it is unset.
func (*GridFile) Read ¶
Read reads into b the next available data from the file and returns the number of bytes written and an error in case something wrong happened. At the end of the file, n will be zero and err will be set to os.EOF.
The parameters and behavior of this function turn the file into an io.Reader.
func (*GridFile) Seek ¶
Seek sets the offset for the next Read or Write on file to offset, interpreted according to whence: 0 means relative to the origin of the file, 1 means relative to the current offset, and 2 means relative to the end. It returns the new offset and an Error, if any.
func (*GridFile) SetChunkSize ¶
SetChunkSize sets size of saved chunks. Once the file is written to, it will be split in blocks of that size and each block saved into an independent chunk document. The default chunk size is 256kb.
It is a runtime error to call this function once the file has started being written to.
func (*GridFile) SetContentType ¶
ContentType changes the optional file content type. An empty string may be used to unset it.
It is a runtime error to call this function when the file is not open for writing.
func (*GridFile) SetId ¶
func (file *GridFile) SetId(id interface{})
SetId changes the current file Id. It is a runtime
It is a runtime error to call this function once the file has started being written to, or when the file is not open for writing.
func (*GridFile) SetMeta ¶
func (file *GridFile) SetMeta(metadata interface{})
SetMeta changes the optional "metadata" field associated with the file. The meaning of keys under that field is user-defined. For example:
file.SetMeta(bson.M{"inode": inode})
It is a runtime error to call this function when the file is not open for writing.
func (*GridFile) SetName ¶
SetName changes the optional file name. An empty string may be used to unset it.
It is a runtime error to call this function when the file is not open for writing.
func (*GridFile) UploadDate ¶
UploadDate returns the file upload time.
func (*GridFile) Write ¶
Write writes the provided data to the file and returns the number of bytes written and an error in case something wrong happened.
The file will internally cache the data so that all but the last chunk sent to the database have the size defined by SetChunkSize. This also means that errors may be deferred until a future call to Write or Close.
The parameters and behavior of this function turn the file into an io.Writer.
type Index ¶
type Index struct { Key []string // Index key fields; prefix name with dash (-) for descending order Unique bool // Prevent two documents from having the same index key DropDups bool // Drop documents with the same index key as a previously indexed one Background bool // Build index in background and return immediately Sparse bool // Only index documents containing the Key fields ExpireAfter time.Duration // Periodically delete docs with indexed time.Time older than that. Name string // Index name, computed by EnsureIndex Bits, Min, Max int // Properties for spatial indexes }
type Iter ¶
type Iter struct {
// contains filtered or unexported fields
}
func (*Iter) All ¶
All retrieves all documents from the result set into the provided slice and closes the iterator.
The result argument must necessarily be the address for a slice. The slice may be nil or previously allocated.
WARNING: Obviously, All must not be used with result sets that may be potentially large, since it may consume all memory until the system crashes. Consider building the query with a Limit clause to ensure the result size is bounded.
For instance:
var result []struct{ Value int } iter := collection.Find(nil).Limit(100).Iter() err := iter.All(&result) if err != nil { return err }
func (*Iter) Close ¶
Close kills the server cursor used by the iterator, if any, and returns nil if no errors happened during iteration, or the actual error otherwise.
Server cursors are automatically closed at the end of an iteration, which means close will do nothing unless the iteration was interrupted before the server finished sending results to the driver. If Close is not called in such a situation, the cursor will remain available at the server until the default cursor timeout period is reached. No further problems arise.
Close is idempotent. That means it can be called repeatedly and will return the same result every time.
In case a resulting document included a field named $err or errmsg, which are standard ways for MongoDB to report an improper query, the returned value has a *QueryError type.
func (*Iter) Err ¶
Err returns nil if no errors happened during iteration, or the actual error otherwise.
In case a resulting document included a field named $err or errmsg, which are standard ways for MongoDB to report an improper query, the returned value has a *QueryError type, and includes the Err message and the Code.
func (*Iter) For ¶
The For method is obsolete and will be removed in a future release. See Iter as an elegant replacement.
func (*Iter) Next ¶
Next retrieves the next document from the result set, blocking if necessary. This method will also automatically retrieve another batch of documents from the server when the current one is exhausted, or before that in background if pre-fetching is enabled (see the Query.Prefetch and Session.SetPrefetch methods).
Next returns true if a document was successfully unmarshalled onto result, and false at the end of the result set or if an error happened. When Next returns false, the Err method should be called to verify if there was an error during iteration.
For example:
iter := collection.Find(nil).Iter() for iter.Next(&result) { fmt.Printf("Result: %v\n", result.Id) } if err := iter.Close(); err != nil { return err }
type LastError ¶
type MapReduce ¶
type MapReduce struct { Map string // Map Javascript function code (required) Reduce string // Reduce Javascript function code (required) Finalize string // Finalize Javascript function code (optional) Out interface{} // Output collection name or document. If nil, results are inlined into the result parameter. Scope interface{} // Optional global scope for Javascript functions Verbose bool }
type MapReduceInfo ¶
type MapReduceInfo struct { InputCount int // Number of documents mapped EmitCount int // Number of times reduce called emit OutputCount int // Number of documents in resulting collection Database string // Output database, if results are not inlined Collection string // Output collection, if results are not inlined Time int64 // Time to run the job, in nanoseconds VerboseTime *MapReduceTime // Only defined if Verbose was true }
type MapReduceTime ¶
type Pipe ¶
type Pipe struct {
// contains filtered or unexported fields
}
type Query ¶
type Query struct {
// contains filtered or unexported fields
}
func (*Query) Apply ¶
func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err error)
Apply allows updating, upserting or removing a document matching a query and atomically returning either the old version (the default) or the new version of the document (when ReturnNew is true). If no objects are found Apply returns ErrNotFound.
The Sort and Select query methods affect the result of Apply. In case multiple documents match the query, Sort enables selecting which document to act upon by ordering it first. Select enables retrieving only a selection of fields of the new or old document.
This simple example increments a counter and prints its new value:
change := mgo.Change{ Update: bson.M{"$inc": bson.M{"n": 1}}, ReturnNew: true, } info, err = col.Find(M{"_id": id}).Apply(change, &doc) fmt.Println(doc.N)
Relevant documentation:
http://www.mongodb.org/display/DOCS/findAndModify+Command http://www.mongodb.org/display/DOCS/Updating http://www.mongodb.org/display/DOCS/Atomic+Operations
func (*Query) Batch ¶
Batch sets the batch size used when fetching documents from the database. It's possible to change this setting on a per-session basis as well, using the Batch method of Session.
The default batch size is defined by the database itself. As of this writing, MongoDB will use an initial size of min(100 docs, 4MB) on the first batch, and 4MB on remaining ones.
func (*Query) Distinct ¶
Distinct returns a list of distinct values for the given key within the result set. The list of distinct values will be unmarshalled in the "values" key of the provided result parameter.
For example:
var result []int err := collection.Find(bson.M{"gender": "F"}).Distinct("age", &result)
Relevant documentation:
http://www.mongodb.org/display/DOCS/Aggregation
func (*Query) Explain ¶
Explain returns a number of details about how the MongoDB server would execute the requested query, such as the number of objects examined, the number of time the read lock was yielded to allow writes to go in, and so on.
For example:
m := bson.M{} err := collection.Find(bson.M{"filename": name}).Explain(m) if err == nil { fmt.Printf("Explain: %#v\n", m) }
Relevant documentation:
http://www.mongodb.org/display/DOCS/Optimization http://www.mongodb.org/display/DOCS/Query+Optimizer
func (*Query) For ¶
The For method is obsolete and will be removed in a future release. See Iter as an elegant replacement.
func (*Query) Hint ¶
Hint will include an explicit "hint" in the query to force the server to use a specified index, potentially improving performance in some situations. The provided parameters are the fields that compose the key of the index to be used. For details on how the indexKey may be built, see the EnsureIndex method.
For example:
query := collection.Find(bson.M{"firstname": "Joe", "lastname": "Winter"}) query.Hint("lastname", "firstname")
Relevant documentation:
http://www.mongodb.org/display/DOCS/Optimization http://www.mongodb.org/display/DOCS/Query+Optimizer
func (*Query) Iter ¶
Iter executes the query and returns an iterator capable of going over all the results. Results will be returned in batches of configurable size (see the Batch method) and more documents will be requested when a configurable number of documents is iterated over (see the Prefetch method).
func (*Query) Limit ¶
Limit restricts the maximum number of documents retrieved to n, and also changes the batch size to the same value. Once n documents have been returned by Next, the following call will return ErrNotFound.
func (*Query) LogReplay ¶
LogReplay enables an option that optimizes queries that are typically made against the MongoDB oplog for replaying it. This is an internal implementation aspect and most likely uninteresting for other uses. It has seen at least one use case, though, so it's exposed via the API.
func (*Query) MapReduce ¶
func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceInfo, err error)
MapReduce executes a map/reduce job for documents covered by the query. That kind of job is suitable for very flexible bulk aggregation of data performed at the server side via Javascript functions.
Results from the job may be returned as a result of the query itself through the result parameter in case they'll certainly fit in memory and in a single document. If there's the possibility that the amount of data might be too large, results must be stored back in an alternative collection or even a separate database, by setting the Out field of the provided MapReduce job. In that case, provide nil as the result parameter.
These are some of the ways to set Out:
nil Inline results into the result parameter. bson.M{"replace": "mycollection"} The output will be inserted into a collection which replaces any existing collection with the same name. bson.M{"merge": "mycollection"} This option will merge new data into the old output collection. In other words, if the same key exists in both the result set and the old collection, the new key will overwrite the old one. bson.M{"reduce": "mycollection"} If documents exist for a given key in the result set and in the old collection, then a reduce operation (using the specified reduce function) will be performed on the two values and the result will be written to the output collection. If a finalize function was provided, this will be run after the reduce as well. bson.M{...., "db": "mydb"} Any of the above options can have the "db" key included for doing the respective action in a separate database.
The following is a trivial example which will count the number of occurrences of a field named n on each document in a collection, and will return results inline:
job := &mgo.MapReduce{ Map: "function() { emit(this.n, 1) }", Reduce: "function(key, values) { return Array.sum(values) }", } var result []struct { Id int "_id"; Value int } _, err := collection.Find(nil).MapReduce(job, &result) if err != nil { return err } for _, item := range result { fmt.Println(item.Value) }
This function is compatible with MongoDB 1.7.4+.
Relevant documentation:
http://www.mongodb.org/display/DOCS/MapReduce
func (*Query) One ¶
One executes the query and unmarshals the first obtained document into the result argument. The result must be a struct or map value capable of being unmarshalled into by gobson. This function blocks until either a result is available or an error happens. For example:
err := collection.Find(bson.M{"a", 1}).One(&result)
In case the resulting document includes a field named $err or errmsg, which are standard ways for MongoDB to return query errors, the returned err will be set to a *QueryError value including the Err message and the Code. In those cases, the result argument is still unmarshalled into with the received document so that any other custom values may be obtained if desired.
func (*Query) Prefetch ¶
Prefetch sets the point at which the next batch of results will be requested. When there are p*batch_size remaining documents cached in an Iter, the next batch will be requested in background. For instance, when using this:
query.Batch(200).Prefetch(0.25)
and there are only 50 documents cached in the Iter to be processed, the next batch of 200 will be requested. It's possible to change this setting on a per-session basis as well, using the SetPrefetch method of Session.
The default prefetch value is 0.25.
func (*Query) Select ¶
Select enables selecting which fields should be retrieved for the results found. For example, the following query would only retrieve the name field:
err := collection.Find(nil).Select(bson.M{"name": 1}).One(&result)
Relevant documentation:
http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields
func (*Query) Skip ¶
Skip skips over the n initial documents from the query results. Note that this only makes sense with capped collections where documents are naturally ordered by insertion time, or with sorted results.
func (*Query) Snapshot ¶
Snapshot will force the performed query to make use of an available index on the _id field to prevent the same document from being returned more than once in a single iteration. This might happen without this setting in situations when the document changes in size and thus has to be moved while the iteration is running.
Because snapshot mode traverses the _id index, it may not be used with sorting or explicit hints. It also cannot use any other index for the query.
Even with snapshot mode, items inserted or deleted during the query may or may not be returned; that is, this mode is not a true point-in-time snapshot.
The same effect of Snapshot may be obtained by using any unique index on field(s) that will not be modified (best to use Hint explicitly too). A non-unique index (such as creation time) may be made unique by appending _id to the index when creating it.
Relevant documentation:
http://www.mongodb.org/display/DOCS/How+to+do+Snapshotted+Queries+in+the+Mongo+Database
func (*Query) Sort ¶
Sort asks the database to order returned documents according to the provided field names. A field name may be prefixed by - (minus) for it to be sorted in reverse order.
For example:
query1 := collection.Find(nil).Sort("firstname", "lastname") query2 := collection.Find(nil).Sort("-age") query3 := collection.Find(nil).Sort("$natural")
Relevant documentation:
http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order
func (*Query) Tail ¶
Tail returns a tailable iterator. Unlike a normal iterator, a tailable iterator may wait for new values to be inserted in the collection once the end of the current result set is reached, A tailable iterator may only be used with capped collections.
The timeout parameter indicates how long Next will block waiting for a result before timing out. If set to -1, Next will not timeout, and will continue waiting for a result for as long as the cursor is valid and the session is not closed. If set to 0, Next times out as soon as it reaches the end of the result set. Otherwise, Next will wait for at least the given number of seconds for a new document to be available before timing out.
On timeouts, Next will unblock and return false, and the Timeout method will return true if called. In these cases, Next may still be called again on the same iterator to check if a new value is available at the current cursor position, and again it will block according to the specified timeoutSecs. If the cursor becomes invalid, though, both Next and Timeout will return false and the query must be restarted.
The following example demonstrates timeout handling and query restarting:
iter := collection.Find(nil).Sort("$natural").Tail(5 * time.Second) for { for iter.Next(&result) { fmt.Println(result.Id) lastId = result.Id } if err := iter.Close(); err != nil { return err } if iter.Timeout() { continue } query := collection.Find(bson.M{"_id": bson.M{"$gt": lastId}}) iter = query.Sort("$natural").Tail(5 * time.Second) }
Relevant documentation:
http://www.mongodb.org/display/DOCS/Tailable+Cursors http://www.mongodb.org/display/DOCS/Capped+Collections http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order
type QueryError ¶
func (*QueryError) Error ¶
func (err *QueryError) Error() string
type Role ¶
type Role string
const ( // Relevant documentation: // // http://docs.mongodb.org/manual/reference/user-privileges/ // RoleRead Role = "read" RoleReadAny Role = "readAnyDatabase" RoleReadWrite Role = "readWrite" RoleReadWriteAny Role = "readWriteAnyDatabase" RoleDBAdmin Role = "dbAdmin" RoleDBAdminAny Role = "dbAdminAnyDatabase" RoleUserAdmin Role = "userAdmin" RoleUserAdminAny Role = "UserAdminAnyDatabase" RoleClusterAdmin Role = "clusterAdmin" )
type Safe ¶
type Safe struct { W int // Min # of servers to ack before success WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") WTimeout int // Milliseconds to wait for W before timing out FSync bool // Should servers sync to disk before returning success J bool // Wait for next group commit if journaling; no effect otherwise }
See SetSafe for details on the Safe type.
type Session ¶
type Session struct {
// contains filtered or unexported fields
}
func Dial ¶
Dial establishes a new session to the cluster identified by the given seed server(s). The session will enable communication with all of the servers in the cluster, so the seed servers are used only to find out about the cluster topology.
Dial will timeout after 10 seconds if a server isn't reached. The returned session will timeout operations after one minute by default if servers aren't available. To customize the timeout, see DialWithTimeout, SetSyncTimeout, and SetSocketTimeout.
This method is generally called just once for a given cluster. Further sessions to the same cluster are then established using the New or Copy methods on the obtained session. This will make them share the underlying cluster, and manage the pool of connections appropriately.
Once the session is not useful anymore, Close must be called to release the resources appropriately.
The seed servers must be provided in the following format:
[mongodb://][user:pass@]host1[:port1][,host2[:port2],...][/database][?options]
For example, it may be as simple as:
localhost
Or more involved like:
mongodb://myuser:mypass@localhost:40001,otherhost:40001/mydb
If the port number is not provided for a server, it defaults to 27017.
The username and password provided in the URL will be used to authenticate into the database named after the slash at the end of the host names, or into the "admin" database if none is provided. The authentication information will persist in sessions obtained through the New method as well.
The following connection options are supported after the question mark:
connect=direct This option will disable the automatic replica set server discovery logic, and will only use the servers provided. This enables forcing the communication with a specific server or set of servers (even if they are slaves). Note that to talk to a slave you'll need to relax the consistency requirements using a Monotonic or Eventual mode via SetMode.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Connections
func DialWithInfo ¶
DialWithInfo establishes a new session to the cluster identified by info.
func DialWithTimeout ¶
DialWithTimeout works like Dial, but uses timeout as the amount of time to wait for a server to respond when first connecting and also on follow up operations in the session. If timeout is zero, the call may block forever waiting for a connection to be made.
See SetSyncTimeout for customizing the timeout for the session.
func (*Session) BuildInfo ¶
BuildInfo retrieves the version and other details about the running MongoDB server.
func (*Session) Clone ¶
Clone works just like Copy, but also reuses the same socket as the original session, in case it had already reserved one due to its consistency guarantees. This behavior ensures that writes performed in the old session are necessarily observed when using the new session, as long as it was a strong or monotonic session. That said, it also means that long operations may cause other goroutines using the original session to wait.
func (*Session) Close ¶
func (s *Session) Close()
Close terminates the session. It's a runtime error to use a session after it has been closed.
func (*Session) Copy ¶
Copy works just like New, but preserves the exact authentication information from the original session.
func (*Session) DB ¶
DB returns a value representing the named database. If name is empty, the database name provided in the dialed URL is used instead. If that is also empty, "test" is used as a fallback in a way equivalent to the mongo shell.
Creating this value is a very lightweight operation, and involves no network communication.
func (*Session) DatabaseNames ¶
DatabaseNames returns the names of non-empty databases present in the cluster.
func (*Session) EnsureSafe ¶
EnsureSafe compares the provided safety parameters with the ones currently in use by the session and picks the most conservative choice for each setting.
That is:
- safe.WMode is always used if set.
- safe.W is used if larger than the current W and WMode is empty.
- safe.FSync is always used if true.
- safe.J is used if FSync is false.
- safe.WTimeout is used if set and smaller than the current WTimeout.
For example, the following statement will ensure the session is at least checking for errors, without enforcing further constraints. If a more conservative SetSafe or EnsureSafe call was previously done, the following call will be ignored.
session.EnsureSafe(&mgo.Safe{})
See also the SetSafe method for details on what each option means.
Relevant documentation:
http://www.mongodb.org/display/DOCS/getLastError+Command http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError http://www.mongodb.org/display/DOCS/Data+Center+Awareness
func (*Session) FindRef ¶
FindRef returns a query that looks for the document in the provided reference. For a DBRef to be resolved correctly at the session level it must necessarily have the optional DB field defined.
See also the DBRef type and the FindRef method on Database.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Database+References
func (*Session) Fsync ¶
Fsync flushes in-memory writes to disk on the server the session is established with. If async is true, the call returns immediately, otherwise it returns after the flush has been made.
func (*Session) FsyncLock ¶
FsyncLock locks all writes in the specific server the session is established with and returns. Any writes attempted to the server after it is successfully locked will block until FsyncUnlock is called for the same server.
This method works on slaves as well, preventing the oplog from being flushed while the server is locked, but since only the server connected to is locked, for locking specific slaves it may be necessary to establish a connection directly to the slave (see Dial's connect=direct option).
As an important caveat, note that once a write is attempted and blocks, follow up reads will block as well due to the way the lock is internally implemented in the server. More details at:
https://jira.mongodb.org/browse/SERVER-4243
FsyncLock is often used for performing consistent backups of the database files on disk.
Relevant documentation:
http://www.mongodb.org/display/DOCS/fsync+Command http://www.mongodb.org/display/DOCS/Backups
func (*Session) FsyncUnlock ¶
FsyncUnlock releases the server for writes. See FsyncLock for details.
func (*Session) LiveServers ¶
LiveServers returns a list of server addresses which are currently known to be alive.
func (*Session) LogoutAll ¶
func (s *Session) LogoutAll()
LogoutAll removes all established authentication credentials for the session.
func (*Session) Mode ¶
func (s *Session) Mode() mode
Mode returns the current consistency mode for the session.
func (*Session) New ¶
New creates a new session with the same parameters as the original session, including consistency, batch size, prefetching, safety mode, etc. The returned session will use sockets from the poll, so there's a chance that writes just performed in another session may not yet be visible.
Login information from the original session will not be copied over into the new session unless it was provided through the initial URL for the Dial function.
See the Copy and Clone methods.
func (*Session) Refresh ¶
func (s *Session) Refresh()
Refresh puts back any reserved sockets in use and restarts the consistency guarantees according to the current consistency setting for the session.
func (*Session) ResetIndexCache ¶
func (s *Session) ResetIndexCache()
ResetIndexCache() clears the cache of previously ensured indexes. Following requests to EnsureIndex will contact the server.
func (*Session) Run ¶
Run issues the provided command against the "admin" database and and unmarshals its result in the respective argument. The cmd argument may be either a string with the command name itself, in which case an empty document of the form bson.M{cmd: 1} will be used, or it may be a full command document.
Note that MongoDB considers the first marshalled key as the command name, so when providing a command with options, it's important to use an ordering-preserving document, such as a struct value or an instance of bson.D. For instance:
db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}})
For commands against arbitrary databases, see the Run method in the Database type.
Relevant documentation:
http://www.mongodb.org/display/DOCS/Commands http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips
func (*Session) SelectServers ¶
SelectServers restricts communication to servers configured with the given tags. For example, the following statement restricts servers used for reading operations to those with both tag "disk" set to "ssd" and tag "rack" set to 1:
session.SelectSlaves(bson.D{{"disk", "ssd"}, {"rack", 1}})
Multiple sets of tags may be provided, in which case the used server must match all tags within any one set.
If a connection was previously assigned to the session due to the current session mode (see Session.SetMode), the tag selection will only be enforced after the session is refreshed.
Relevant documentation:
http://docs.mongodb.org/manual/tutorial/configure-replica-set-tag-sets
func (*Session) SetBatch ¶
SetBatch sets the default batch size used when fetching documents from the database. It's possible to change this setting on a per-query basis as well, using the Query.Batch method.
The default batch size is defined by the database itself. As of this writing, MongoDB will use an initial size of min(100 docs, 4MB) on the first batch, and 4MB on remaining ones.
func (*Session) SetMode ¶
SetMode changes the consistency mode for the session.
In the Strong consistency mode reads and writes will always be made to the master server using a unique connection so that reads and writes are fully consistent, ordered, and observing the most up-to-date data. This offers the least benefits in terms of distributing load, but the most guarantees. See also Monotonic and Eventual.
In the Monotonic consistency mode reads may not be entirely up-to-date, but they will always see the history of changes moving forward, the data read will be consistent across sequential queries in the same session, and modifications made within the session will be observed in following queries (read-your-writes).
In practice, the Monotonic mode is obtained by performing initial reads against a unique connection to an arbitrary slave, if one is available, and once the first write happens, the session connection is switched over to the master server. This manages to distribute some of the reading load with slaves, while maintaining some useful guarantees.
In the Eventual consistency mode reads will be made to any slave in the cluster, if one is available, and sequential reads will not necessarily be made with the same connection. This means that data may be observed out of order. Writes will of course be issued to the master, but independent writes in the same Eventual session may also be made with independent connections, so there are also no guarantees in terms of write ordering (no read-your-writes guarantees either).
The Eventual mode is the fastest and most resource-friendly, but is also the one offering the least guarantees about ordering of the data read and written.
If refresh is true, in addition to ensuring the session is in the given consistency mode, the consistency guarantees will also be reset (e.g. a Monotonic session will be allowed to read from slaves again). This is equivalent to calling the Refresh function.
Shifting between Monotonic and Strong modes will keep a previously reserved connection for the session unless refresh is true or the connection is unsuitable (to a slave server in a Strong session).
func (*Session) SetPrefetch ¶
SetPrefetch sets the default point at which the next batch of results will be requested. When there are p*batch_size remaining documents cached in an Iter, the next batch will be requested in background. For instance, when using this:
session.SetBatch(200) session.SetPrefetch(0.25)
and there are only 50 documents cached in the Iter to be processed, the next batch of 200 will be requested. It's possible to change this setting on a per-query basis as well, using the Prefetch method of Query.
The default prefetch value is 0.25.
func (*Session) SetSafe ¶
SetSafe changes the session safety mode.
If the safe parameter is nil, the session is put in unsafe mode, and writes become fire-and-forget, without error checking. The unsafe mode is faster since operations won't hold on waiting for a confirmation.
If the safe parameter is not nil, any changing query (insert, update, ...) will be followed by a getLastError command with the specified parameters, to ensure the request was correctly processed.
The safe.W parameter determines how many servers should confirm a write before the operation is considered successful. If set to 0 or 1, the command will return as soon as the master is done with the request. If safe.WTimeout is greater than zero, it determines how many milliseconds to wait for the safe.W servers to respond before returning an error.
Starting with MongoDB 2.0.0 the safe.WMode parameter can be used instead of W to request for richer semantics. If set to "majority" the server will wait for a majority of members from the replica set to respond before returning. Custom modes may also be defined within the server to create very detailed placement schemas. See the data awareness documentation in the links below for more details (note that MongoDB internally reuses the "w" field name for WMode).
If safe.FSync is true and journaling is disabled, the servers will be forced to sync all files to disk immediately before returning. If the same option is true but journaling is enabled, the server will instead await for the next group commit before returning.
Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync to force the server to wait for a group commit in case journaling is enabled. The option has no effect if the server has journaling disabled.
For example, the following statement will make the session check for errors, without imposing further constraints:
session.SetSafe(&mgo.Safe{})
The following statement will force the server to wait for a majority of members of a replica set to return (MongoDB 2.0+ only):
session.SetSafe(&mgo.Safe{WMode: "majority"})
The following statement, on the other hand, ensures that at least two servers have flushed the change to disk before confirming the success of operations:
session.EnsureSafe(&mgo.Safe{W: 2, FSync: true})
The following statement, on the other hand, disables the verification of errors entirely:
session.SetSafe(nil)
See also the EnsureSafe method.
Relevant documentation:
http://www.mongodb.org/display/DOCS/getLastError+Command http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError http://www.mongodb.org/display/DOCS/Data+Center+Awareness
func (*Session) SetSocketTimeout ¶
SetSocketTimeout sets the amount of time to wait for a non-responding socket to the database before it is forcefully closed.
func (*Session) SetSyncTimeout ¶
SetSyncTimeout sets the amount of time an operation with this session will wait before returning an error in case a connection to a usable server can't be established. Set it to zero to wait forever. The default value is 7 seconds.
type Stats ¶
type User ¶
type User struct { // Username is how the user identifies itself to the system. Username string `bson:"user"` // Password is the plaintext password for the user. If set, // the UpsertUser method will hash it into PasswordHash and // unset it before the user is added to the database. Password string `bson:",omitempty"` // PasswordHash is the MD5 hash of Username+":mongo:"+Password. PasswordHash string `bson:"pwd,omitempty"` // UserSource indicates where to look for this user's credentials. // It may be set to a database name, or to "$external" for // consulting an external resource such as Kerberos. UserSource // must not be set if Password or PasswordHash are present. UserSource string `bson:"userSource,omitempty"` // Roles indicates the set of roles the user will be provided. // See the Role constants. Roles []Role `bson:"roles"` // OtherDBRoles allows assigning roles in other databases from // user documents inserted in the admin database. This field // only works in the admin database. OtherDBRoles map[string][]Role `bson:"otherDBRoles,omitempty"` }
User represents a MongoDB user.
Relevant documentation:
http://docs.mongodb.org/manual/reference/privilege-documents/ http://docs.mongodb.org/manual/reference/user-privileges/