-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Driver (client) for MongoDB, a free, scalable, fast, document -- DBMS -- -- This package lets you connect to MongoDB servers and update/query -- their data. Please see the example in Database.MongoDB and the -- tutorial from the homepage. For information about MongoDB itself, see -- www.mongodb.org. @package mongoDB @version 2.3.0.5 -- | This module defines a connection interface. It could be a regular -- network connection, TLS connection, a mock or anything else. module Database.MongoDB.Transport -- | Abstract transport interface -- -- read should return null on EOF data Transport Transport :: (Int -> IO ByteString) -> (ByteString -> IO ()) -> IO () -> IO () -> Transport [read] :: Transport -> Int -> IO ByteString [write] :: Transport -> ByteString -> IO () [flush] :: Transport -> IO () [close] :: Transport -> IO () -- | Make connection from handle fromHandle :: Handle -> IO Transport -- | Query and update documents module Database.MongoDB.Query -- | A monad on top of m (which must be a MonadIO) that may access the -- database and may fail with a DB Failure type Action = ReaderT MongoContext -- | Run action against database on server at other end of pipe. Use access -- mode for any reads and writes. Throw Failure in case of any -- error. access :: (MonadIO m) => Pipe -> AccessMode -> Database -> Action m a -> m a -- | A connection failure, or a read or write exception like cursor expired -- or inserting a duplicate key. Note, unexpected data from the server is -- not a Failure, rather it is a programming error (you should call -- error in this case) because the client and server are -- incompatible and requires a programming change. data Failure -- | TCP connection (Pipeline) failed. May work if you try again on -- the same Mongo Connection which will create a new Pipe. ConnectionFailure :: IOError -> Failure -- | Cursor expired because it wasn't accessed for over 10 minutes, or this -- cursor came from a different server that the one you are currently -- connected to (perhaps a fail over happen between servers in a replica -- set) CursorNotFoundFailure :: CursorId -> Failure -- | Query failed for some reason as described in the string QueryFailure :: ErrorCode -> String -> Failure -- | Error observed by getLastError after a write, error description is in -- string, index of failed document is the first argument WriteFailure :: Int -> ErrorCode -> String -> Failure -- | Write concern error. It's reported only by insert, update, delete -- commands. Not by wire protocol. WriteConcernFailure :: Int -> String -> Failure -- | fetch found no document matching selection DocNotFound :: Selection -> Failure -- | aggregate returned an error AggregateFailure :: String -> Failure -- | When we need to aggregate several failures and report them. CompoundFailure :: [Failure] -> Failure -- | The structure of the returned documents doesn't match what we expected ProtocolFailure :: Int -> String -> Failure -- | Error code from getLastError or query failure type ErrorCode = Int -- | Type of reads and writes to perform data AccessMode -- | Read-only action, reading stale data from a slave is OK. ReadStaleOk :: AccessMode -- | Read-write action, slave not OK, every write is fire & forget. UnconfirmedWrites :: AccessMode -- | Read-write action, slave not OK, every write is confirmed with -- getLastError. ConfirmWrites :: GetLastError -> AccessMode -- | Parameters for getLastError command. For example ["w" =: 2] -- tells the server to wait for the write to reach at least two servers -- in replica set before acknowledging. See -- http://www.mongodb.org/display/DOCS/Last+Error+Commands for -- more options. type GetLastError = Document -- | Same as ConfirmWrites [] master :: AccessMode -- | Same as ReadStaleOk slaveOk :: AccessMode -- | Run action with given AccessMode accessMode :: (Monad m) => AccessMode -> Action m a -> Action m a liftDB :: (MonadReader env m, HasMongoContext env, MonadIO m) => Action IO a -> m a -- | Values needed when executing a db operation data MongoContext -- | operations query/update this database MongoContext :: Pipe -> AccessMode -> Database -> MongoContext -- | operations read/write to this pipelined TCP connection to a MongoDB -- server [mongoPipe] :: MongoContext -> Pipe -- | read/write operation will use this access mode [mongoAccessMode] :: MongoContext -> AccessMode [mongoDatabase] :: MongoContext -> Database class HasMongoContext env mongoContext :: HasMongoContext env => env -> MongoContext type Database = Text -- | List all databases residing on server allDatabases :: (MonadIO m) => Action m [Database] -- | Run action against given database useDb :: (Monad m) => Database -> Action m a -> Action m a -- | Current database in use thisDatabase :: (Monad m) => Action m Database type Username = Text type Password = Text -- | Authenticate with the current database (if server is running in secure -- mode). Return whether authentication was successful or not. -- Reauthentication is required for every new pipe. SCRAM-SHA-1 will be -- used for server versions 3.0+, MONGO-CR for lower versions. auth :: MonadIO m => Username -> Password -> Action m Bool -- | Authenticate with the current database, using the MongoDB-CR -- authentication mechanism (default in MongoDB server < 3.0) authMongoCR :: (MonadIO m) => Username -> Password -> Action m Bool -- | Authenticate with the current database, using the SCRAM-SHA-1 -- authentication mechanism (default in MongoDB server >= 3.0) authSCRAMSHA1 :: MonadIO m => Username -> Password -> Action m Bool -- | Collection name (not prefixed with database) type Collection = Text -- | List all collections in this database allCollections :: MonadIO m => Action m [Collection] -- | Selects documents in collection that match selector data Selection Select :: Selector -> Collection -> Selection [selector] :: Selection -> Selector [coll] :: Selection -> Collection -- | Filter for a query, analogous to the where clause in SQL. [] -- matches all documents in collection. ["x" =: a, "y" =: b] is -- analogous to where x = a and y = b in SQL. See -- http://www.mongodb.org/display/DOCS/Querying for full selector -- syntax. type Selector = Document -- | Add Javascript predicate to selector, in which case a document must -- match both selector and predicate whereJS :: Selector -> Javascript -> Selector class Select aQueryOrSelection -- | Query or Selection that selects documents in collection -- that match selector. The choice of type depends on use, for example, -- in find (select sel col) it is a Query, and in -- delete (select sel col) it is a Selection. select :: Select aQueryOrSelection => Selector -> Collection -> aQueryOrSelection -- | Insert document into collection and return its "_id" value, which is -- created automatically if not supplied insert :: (MonadIO m) => Collection -> Document -> Action m Value -- | Same as insert except don't return _id insert_ :: (MonadIO m) => Collection -> Document -> Action m () -- | Insert documents into collection and return their "_id" values, which -- are created automatically if not supplied. If a document fails to be -- inserted (eg. due to duplicate key) then remaining docs are aborted, -- and LastError is set. An exception will be throw if any error occurs. insertMany :: (MonadIO m) => Collection -> [Document] -> Action m [Value] -- | Same as insertMany except don't return _ids insertMany_ :: (MonadIO m) => Collection -> [Document] -> Action m () -- | Insert documents into collection and return their "_id" values, which -- are created automatically if not supplied. If a document fails to be -- inserted (eg. due to duplicate key) then remaining docs are still -- inserted. insertAll :: (MonadIO m) => Collection -> [Document] -> Action m [Value] -- | Same as insertAll except don't return _ids insertAll_ :: (MonadIO m) => Collection -> [Document] -> Action m () -- | Save document to collection, meaning insert it if its new (has no -- "_id" field) or upsert it if its not new (has "_id" field) save :: (MonadIO m) => Collection -> Document -> Action m () -- | Replace first document in selection with given document replace :: (MonadIO m) => Selection -> Document -> Action m () -- | Replace first document in selection with given document, or insert -- document if selection is empty -- | Deprecated: use upsert instead repsert :: (MonadIO m) => Selection -> Document -> Action m () -- | Update first document in selection with given document, or insert -- document if selection is empty upsert :: (MonadIO m) => Selection -> Document -> Action m () -- | Update operations on fields in a document. See -- http://www.mongodb.org/display/DOCS/Updating#Updating-ModifierOperations type Modifier = Document -- | Update all documents in selection using given modifier modify :: (MonadIO m) => Selection -> Modifier -> Action m () -- | Bulk update operation. If one update fails it will not update the -- remaining - documents. Current returned value is only a place holder. -- With mongodb server - before 2.6 it will send update requests one by -- one. In order to receive - error messages in versions under 2.6 you -- need to user confirmed writes. - Otherwise even if the errors had -- place the list of errors will be empty and - the result will be -- success. After 2.6 it will use bulk update feature in - mongodb. updateMany :: (MonadIO m) => Collection -> [(Selector, Document, [UpdateOption])] -> Action m WriteResult -- | Bulk update operation. If one update fails it will proceed with the - -- remaining documents. With mongodb server before 2.6 it will send -- update - requests one by one. In order to receive error messages in -- versions under - 2.6 you need to use confirmed writes. Otherwise even -- if the errors had - place the list of errors will be empty and the -- result will be success. - After 2.6 it will use bulk update feature in -- mongodb. updateAll :: (MonadIO m) => Collection -> [(Selector, Document, [UpdateOption])] -> Action m WriteResult data WriteResult WriteResult :: Bool -> Int -> Maybe Int -> Int -> [Upserted] -> [Failure] -> [Failure] -> WriteResult [failed] :: WriteResult -> Bool [nMatched] :: WriteResult -> Int [nModified] :: WriteResult -> Maybe Int -- | Mongodb server before 2.6 doesn't allow to calculate this value. This -- field is meaningless if we can't calculate the number of modified -- documents. [nRemoved] :: WriteResult -> Int [upserted] :: WriteResult -> [Upserted] [writeErrors] :: WriteResult -> [Failure] [writeConcernErrors] :: WriteResult -> [Failure] data UpdateOption -- | If set, the database will insert the supplied object into the -- collection if no matching document is found Upsert :: UpdateOption -- | If set, the database will update all matching objects in the -- collection. Otherwise only updates first matching doc MultiUpdate :: UpdateOption data Upserted Upserted :: Int -> ObjectId -> Upserted [upsertedIndex] :: Upserted -> Int [upsertedId] :: Upserted -> ObjectId -- | Delete all documents in selection delete :: (MonadIO m) => Selection -> Action m () -- | Delete first document in selection deleteOne :: (MonadIO m) => Selection -> Action m () -- | Bulk delete operation. If one delete fails it will not delete the -- remaining - documents. Current returned value is only a place holder. -- With mongodb server - before 2.6 it will send delete requests one by -- one. After 2.6 it will use - bulk delete feature in mongodb. deleteMany :: (MonadIO m) => Collection -> [(Selector, [DeleteOption])] -> Action m WriteResult -- | Bulk delete operation. If one delete fails it will proceed with the - -- remaining documents. Current returned value is only a place holder. -- With - mongodb server before 2.6 it will send delete requests one by -- one. After 2.6 - it will use bulk delete feature in mongodb. deleteAll :: (MonadIO m) => Collection -> [(Selector, [DeleteOption])] -> Action m WriteResult data DeleteOption -- | If set, the database will remove only the first matching document in -- the collection. Otherwise all matching documents will be removed SingleRemove :: DeleteOption -- | Use select to create a basic query with defaults, then modify -- if desired. For example, (select sel col) {limit = 10} data Query Query :: [QueryOption] -> Selection -> Projector -> Word32 -> Limit -> Order -> Bool -> BatchSize -> Order -> Query -- | Default = [] [options] :: Query -> [QueryOption] [selection] :: Query -> Selection -- | [] = all fields. Default = [] [project] :: Query -> Projector -- | Number of initial matching documents to skip. Default = 0 [skip] :: Query -> Word32 -- | Maximum number of documents to return, 0 = no limit. Default = 0 [limit] :: Query -> Limit -- | Sort results by this order, [] = no sort. Default = [] [sort] :: Query -> Order -- | If true assures no duplicates are returned, or objects missed, which -- were present at both the start and end of the query's execution (even -- if the object were updated). If an object is new during the query, or -- deleted during the query, it may or may not be returned, even with -- snapshot mode. Note that short query responses (less than 1MB) are -- always effectively snapshotted. Default = False [snapshot] :: Query -> Bool -- | The number of document to return in each batch response from the -- server. 0 means use Mongo default. Default = 0 [batchSize] :: Query -> BatchSize -- | Force MongoDB to use this index, [] = no hint. Default = [] [hint] :: Query -> Order data QueryOption -- | Tailable means cursor is not closed when the last data is retrieved. -- Rather, the cursor marks the final object's position. You can resume -- using the cursor later, from where it was located, if more data were -- received. Like any "latent cursor", the cursor may become invalid at -- some point – for example if the final object it references were -- deleted. Thus, you should be prepared to requery on CursorNotFound -- exception. TailableCursor :: QueryOption -- | The server normally times out idle cursors after 10 minutes to prevent -- a memory leak in case a client forgets to close a cursor. Set this -- option to allow a cursor to live forever until it is closed. NoCursorTimeout :: QueryOption -- | Use with TailableCursor. If we are at the end of the data, block for a -- while rather than returning no data. After a timeout period, we do -- return as normal. | Exhaust -- ^ Stream the data down full blast in -- multiple "more" packages, on the assumption that the client will fully -- read all data queried. Faster when you are pulling a lot of data and -- know you want to pull it all down. Note: the client is not allowed to -- not read all the data unless it closes the connection. Exhaust -- commented out because not compatible with current Pipeline -- implementation AwaitData :: QueryOption -- | Get partial results from a _mongos_ if some shards are down, instead -- of throwing an error. Partial :: QueryOption -- | Fields to return, analogous to the select clause in SQL. [] -- means return whole document (analogous to * in SQL). ["x" =: 1, -- "y" =: 1] means return only x and y fields of -- each document. ["x" =: 0] means return all fields except -- x. type Projector = Document -- | Maximum number of documents to return, i.e. cursor will close after -- iterating over this number of documents. 0 means no limit. type Limit = Word32 -- | Fields to sort by. Each one is associated with 1 or -1. Eg. ["x" -- =: 1, "y" =: -1] means sort by x ascending then -- y descending type Order = Document -- | The number of document to return in each batch response from the -- server. 0 means use Mongo default. type BatchSize = Word32 -- | Return performance stats of query execution explain :: (MonadIO m) => Query -> Action m Document -- | Fetch documents satisfying query find :: MonadIO m => Query -> Action m Cursor -- | Fetch first document satisfying query or Nothing if none satisfy it findOne :: (MonadIO m) => Query -> Action m (Maybe Document) -- | Same as findOne except throw DocNotFound if none match fetch :: (MonadIO m) => Query -> Action m Document -- | runs the findAndModify command as an update without an upsert and new -- set to true. Returns a single updated document (new option is set to -- true). -- -- see findAndModifyOpts if you want to use findAndModify in a -- differnt way findAndModify :: MonadIO m => Query -> Document -> Action m (Either String Document) -- | runs the findAndModify command, allows more options than -- findAndModify findAndModifyOpts :: MonadIO m => Query -> FindAndModifyOpts -> Action m (Either String (Maybe Document)) data FindAndModifyOpts FamRemove :: Bool -> FindAndModifyOpts FamUpdate :: Document -> Bool -> Bool -> FindAndModifyOpts [famUpdate] :: FindAndModifyOpts -> Document [famNew] :: FindAndModifyOpts -> Bool [famUpsert] :: FindAndModifyOpts -> Bool defFamUpdateOpts :: Document -> FindAndModifyOpts -- | Fetch number of documents satisfying query (including effect of skip -- and/or limit if present) count :: (MonadIO m) => Query -> Action m Int -- | Fetch distinct values of field in selected documents distinct :: (MonadIO m) => Label -> Selection -> Action m [Value] -- | Iterator over results of a query. Use next to iterate or -- rest to get all results. A cursor is closed when it is -- explicitly closed, all results have been read from it, garbage -- collected, or not used for over 10 minutes (unless -- NoCursorTimeout option was specified in Query). Reading -- from a closed cursor raises a CursorNotFoundFailure. Note, a -- cursor is not closed when the pipe is closed, so you can open another -- pipe to the same server and continue using the cursor. data Cursor -- | Return next batch of documents in query result, which will be empty if -- finished. nextBatch :: MonadIO m => Cursor -> Action m [Document] -- | Return next document in query result, or Nothing if finished. next :: MonadIO m => Cursor -> Action m (Maybe Document) -- | Return next N documents or less if end is reached nextN :: MonadIO m => Int -> Cursor -> Action m [Document] -- | Return remaining documents in query result rest :: MonadIO m => Cursor -> Action m [Document] closeCursor :: MonadIO m => Cursor -> Action m () isCursorClosed :: MonadIO m => Cursor -> Action m Bool -- | The Aggregate Pipeline type Pipeline = [Document] data AggregateConfig AggregateConfig :: AggregateConfig -- | Runs an aggregate and unpacks the result. See -- http://docs.mongodb.org/manual/core/aggregation/ for details. aggregate :: MonadIO m => Collection -> Pipeline -> Action m [Document] -- | Runs an aggregate and unpacks the result. See -- http://docs.mongodb.org/manual/core/aggregation/ for details. aggregateCursor :: MonadIO m => Collection -> Pipeline -> AggregateConfig -> Action m Cursor -- | Groups documents in collection by key then reduces (aggregates) each -- group data Group Group :: Collection -> GroupKey -> Javascript -> Document -> Selector -> Maybe Javascript -> Group [gColl] :: Group -> Collection -- | Fields to group by [gKey] :: Group -> GroupKey -- | (doc, agg) -> (). The reduce function reduces (aggregates) -- the objects iterated. Typical operations of a reduce function include -- summing and counting. It takes two arguments, the current document -- being iterated over and the aggregation value, and updates the -- aggregate value. [gReduce] :: Group -> Javascript -- | agg. Initial aggregation value supplied to reduce [gInitial] :: Group -> Document -- | Condition that must be true for a row to be considered. [] means -- always true. [gCond] :: Group -> Selector -- | agg -> () | result. An optional function to be run on each -- item in the result set just before the item is returned. Can either -- modify the item (e.g., add an average field given a count and a total) -- or return a replacement object (returning a new object with just _id -- and average fields). [gFinalize] :: Group -> Maybe Javascript -- | Fields to group by, or function (doc -> key) returning a -- "key object" to be used as the grouping key. Use KeyF instead of Key -- to specify a key that is not an existing member of the object (or, to -- access embedded members). data GroupKey Key :: [Label] -> GroupKey KeyF :: Javascript -> GroupKey -- | Execute group query and return resulting aggregate value for each -- distinct key group :: (MonadIO m) => Group -> Action m [Document] -- | Maps every document in collection to a list of (key, value) pairs, -- then for each unique key reduces all its associated values to a single -- result. There are additional parameters that may be set to tweak this -- basic operation. This implements the latest version of map-reduce that -- requires MongoDB 1.7.4 or greater. To map-reduce against an older -- server use runCommand directly as described in -- http://www.mongodb.org/display/DOCS/MapReduce. data MapReduce MapReduce :: Collection -> MapFun -> ReduceFun -> Selector -> Order -> Limit -> MROut -> Maybe FinalizeFun -> Document -> Bool -> MapReduce [rColl] :: MapReduce -> Collection [rMap] :: MapReduce -> MapFun [rReduce] :: MapReduce -> ReduceFun -- | Operate on only those documents selected. Default is [] meaning all -- documents. [rSelect] :: MapReduce -> Selector -- | Default is [] meaning no sort [rSort] :: MapReduce -> Order -- | Default is 0 meaning no limit [rLimit] :: MapReduce -> Limit -- | Output to a collection with a certain merge policy. Default is no -- collection (Inline). Note, you don't want this default if your -- result set is large. [rOut] :: MapReduce -> MROut -- | Function to apply to all the results when finished. Default is -- Nothing. [rFinalize] :: MapReduce -> Maybe FinalizeFun -- | Variables (environment) that can be accessed from -- mapreducefinalize. Default is []. [rScope] :: MapReduce -> Document -- | Provide statistics on job execution time. Default is False. [rVerbose] :: MapReduce -> Bool -- | () -> void. The map function references the variable -- this to inspect the current object under consideration. The -- function must call emit(key,value) at least once, but may be -- invoked any number of times, as may be appropriate. type MapFun = Javascript -- | (key, [value]) -> value. The reduce function receives a -- key and an array of values and returns an aggregate result value. The -- MapReduce engine may invoke reduce functions iteratively; thus, these -- functions must be idempotent. That is, the following must hold for -- your reduce function: reduce(k, [reduce(k,vs)]) == -- reduce(k,vs). If you need to perform an operation only once, use -- a finalize function. The output of emit (the 2nd param) and reduce -- should be the same format to make iterative reduce possible. type ReduceFun = Javascript -- | (key, value) -> final_value. A finalize function may be -- run after reduction. Such a function is optional and is not necessary -- for many map/reduce cases. The finalize function takes a key and a -- value, and returns a finalized value. type FinalizeFun = Javascript data MROut -- | Return results directly instead of writing them to an output -- collection. Results must fit within 16MB limit of a single document Inline :: MROut -- | Write results to given collection, in other database if specified. -- Follow merge policy when entry already exists Output :: MRMerge -> Collection -> (Maybe Database) -> MROut data MRMerge -- | Clear all old data and replace it with new data Replace :: MRMerge -- | Leave old data but overwrite entries with the same key with new data Merge :: MRMerge -- | Leave old data but combine entries with the same key via MR's reduce -- function Reduce :: MRMerge -- | Result of running a MapReduce has some stats besides the output. See -- http://www.mongodb.org/display/DOCS/MapReduce#MapReduce-Resultobject type MRResult = Document -- | MapReduce on collection with given map and reduce functions. Remaining -- attributes are set to their defaults, which are stated in their -- comments. mapReduce :: Collection -> MapFun -> ReduceFun -> MapReduce -- | Run MapReduce and return cursor of results. Error if map/reduce fails -- (because of bad Javascript) runMR :: MonadIO m => MapReduce -> Action m Cursor -- | Run MapReduce and return a MR result document containing stats and the -- results if Inlined. Error if the map/reduce failed (because of bad -- Javascript). runMR' :: (MonadIO m) => MapReduce -> Action m MRResult -- | A command is a special query or action against the database. See -- http://www.mongodb.org/display/DOCS/Commands for details. type Command = Document -- | Run command against the database and return its result runCommand :: (MonadIO m) => Command -> Action m Document -- |
-- runCommand1 foo = runCommand [foo =: 1] --runCommand1 :: (MonadIO m) => Text -> Action m Document -- | Run code on server eval :: (MonadIO m, Val v) => Javascript -> Action m v retrieveServerData :: (MonadIO m) => Action m ServerData data ServerData ServerData :: Bool -> Int -> Int -> Int -> Int -> Int -> ServerData [isMaster] :: ServerData -> Bool [minWireVersion] :: ServerData -> Int [maxWireVersion] :: ServerData -> Int [maxMessageSizeBytes] :: ServerData -> Int [maxBsonObjectSize] :: ServerData -> Int [maxWriteBatchSize] :: ServerData -> Int instance GHC.Classes.Eq Database.MongoDB.Query.MapReduce instance GHC.Show.Show Database.MongoDB.Query.MapReduce instance GHC.Classes.Eq Database.MongoDB.Query.MROut instance GHC.Show.Show Database.MongoDB.Query.MROut instance GHC.Classes.Eq Database.MongoDB.Query.MRMerge instance GHC.Show.Show Database.MongoDB.Query.MRMerge instance GHC.Classes.Eq Database.MongoDB.Query.Group instance GHC.Show.Show Database.MongoDB.Query.Group instance GHC.Classes.Eq Database.MongoDB.Query.GroupKey instance GHC.Show.Show Database.MongoDB.Query.GroupKey instance GHC.Show.Show Database.MongoDB.Query.AggregateConfig instance GHC.Show.Show Database.MongoDB.Query.FindAndModifyOpts instance GHC.Classes.Eq Database.MongoDB.Query.Query instance GHC.Show.Show Database.MongoDB.Query.Query instance GHC.Classes.Eq Database.MongoDB.Query.ReadMode instance GHC.Show.Show Database.MongoDB.Query.ReadMode instance GHC.Classes.Eq Database.MongoDB.Query.WriteMode instance GHC.Show.Show Database.MongoDB.Query.WriteMode instance GHC.Show.Show Database.MongoDB.Query.WriteResult instance GHC.Classes.Eq Database.MongoDB.Query.Failure instance GHC.Show.Show Database.MongoDB.Query.Failure instance GHC.Classes.Eq Database.MongoDB.Query.Selection instance GHC.Show.Show Database.MongoDB.Query.Selection instance GHC.Show.Show Database.MongoDB.Query.Upserted instance GHC.Show.Show Database.MongoDB.Query.AccessMode instance Data.Default.Class.Default Database.MongoDB.Query.AggregateConfig instance Database.MongoDB.Query.Select Database.MongoDB.Query.Query instance Database.MongoDB.Query.Select Database.MongoDB.Query.Selection instance Database.MongoDB.Query.Result Database.MongoDB.Query.WriteResult instance GHC.Exception.Exception Database.MongoDB.Query.Failure instance Control.Monad.Trans.Error.Error Database.MongoDB.Query.Failure instance Database.MongoDB.Query.HasMongoContext Database.MongoDB.Query.MongoContext instance Database.MongoDB.Query.Result (Data.Either.Either a b) -- | Connect to a single server or a replica set of servers module Database.MongoDB.Connection type Secs = Double -- | Thread-safe TCP connection with pipelined requests type Pipe = Pipeline -- | Close pipe and underlying connection close :: Pipeline -> IO () isClosed :: Pipeline -> IO Bool data Host Host :: HostName -> PortID -> Host data PortID :: * Service :: String -> PortID PortNumber :: PortNumber -> PortID UnixSocket :: String -> PortID -- | Default MongoDB port = 27017 defaultPort :: PortID -- | Host on defaultPort host :: HostName -> Host -- | Display host as "host:port" TODO: Distinguish Service and UnixSocket -- port showHostPort :: Host -> String -- | Read string "hostname:port" as Host hostname (PortNumber -- port) or "hostname" as host hostname (default port). -- Error if string does not match either syntax. readHostPort :: String -> Host -- | Read string "hostname:port" as Host hosthame (PortNumber -- port) or "hostname" as host hostname (default port). -- Fail if string does not match either syntax. TODO: handle Service and -- UnixSocket port readHostPortM :: (Monad m) => String -> m Host -- | connect (and openReplicaSet) fails if it can't connect -- within this many seconds (default is 6 seconds). Use 'connect\'' (and -- 'openReplicaSet\'') if you want to ignore this global and specify your -- own timeout. Note, this timeout only applies to initial connection -- establishment, not when reading/writing to the connection. globalConnectTimeout :: IORef Secs -- | Connect to Host returning pipelined TCP connection. Throw IOError if -- connection refused or no response within globalConnectTimeout. connect :: Host -> IO Pipe -- | Connect to Host returning pipelined TCP connection. Throw IOError if -- connection refused or no response within given number of seconds. connect' :: Secs -> Host -> IO Pipe type ReplicaSetName = Text -- | Open connections (on demand) to servers in replica set. Supplied hosts -- is seed list. At least one of them must be a live member of the named -- replica set, otherwise fail. The value of globalConnectTimeout -- at the time of this call is the timeout used for future member connect -- attempts. To use your own value call 'openReplicaSet\'' instead. openReplicaSet :: (ReplicaSetName, [Host]) -> IO ReplicaSet -- | Open connections (on demand) to servers in replica set. Supplied hosts -- is seed list. At least one of them must be a live member of the named -- replica set, otherwise fail. Supplied seconds timeout is used for -- connect attempts to members. openReplicaSet' :: Secs -> (ReplicaSetName, [Host]) -> IO ReplicaSet -- | Maintains a connection (created on demand) to each server in the named -- replica set data ReplicaSet -- | Return connection to current primary of replica set. Fail if no -- primary available. primary :: ReplicaSet -> IO Pipe -- | Return connection to a random secondary, or primary if no secondaries -- available. secondaryOk :: ReplicaSet -> IO Pipe -- | Return a connection to a host using a user-supplied sorting function, -- which sorts based on a tuple containing the host and a boolean -- indicating whether the host is primary. routedHost :: ((Host, Bool) -> (Host, Bool) -> IO Ordering) -> ReplicaSet -> IO Pipe -- | Close all connections to replica set closeReplicaSet :: ReplicaSet -> IO () -- | name of connected replica set replSetName :: ReplicaSet -> Text instance GHC.Classes.Ord Database.MongoDB.Connection.Host instance GHC.Classes.Eq Database.MongoDB.Connection.Host instance GHC.Show.Show Database.MongoDB.Connection.Host -- | Database administrative functions module Database.MongoDB.Admin data CollectionOption Capped :: CollectionOption MaxByteSize :: Int -> CollectionOption MaxItems :: Int -> CollectionOption -- | Create collection with given options. You only need to call this to -- set options, otherwise a collection is created automatically on first -- use with no options. createCollection :: (MonadIO m) => [CollectionOption] -> Collection -> Action m Document -- | Rename first collection to second collection renameCollection :: (MonadIO m) => Collection -> Collection -> Action m Document -- | Delete the given collection! Return True if collection existed (and -- was deleted); return False if collection did not exist (and no -- action). dropCollection :: (MonadIO m) => Collection -> Action m Bool -- | This operation takes a while validateCollection :: (MonadIO m) => Collection -> Action m Document data Index Index :: Collection -> Order -> IndexName -> Bool -> Bool -> Maybe Int -> Index [iColl] :: Index -> Collection [iKey] :: Index -> Order [iName] :: Index -> IndexName [iUnique] :: Index -> Bool [iDropDups] :: Index -> Bool [iExpireAfterSeconds] :: Index -> Maybe Int type IndexName = Text -- | Spec of index of ordered keys on collection. Name is generated from -- keys. Unique and dropDups are False. index :: Collection -> Order -> Index -- | Create index if we did not already create one. May be called -- repeatedly with practically no performance hit, because we remember if -- we already called this for the same index (although this memory gets -- wiped out every 15 minutes, in case another client drops the index and -- we want to create it again). ensureIndex :: (MonadIO m) => Index -> Action m () -- | Create index on the server. This call goes to the server every time. createIndex :: (MonadIO m) => Index -> Action m () -- | Remove the index dropIndex :: (MonadIO m) => Collection -> IndexName -> Action m Document -- | Get all indexes on this collection getIndexes :: MonadIO m => Collection -> Action m [Document] -- | Drop all indexes on this collection dropIndexes :: (MonadIO m) => Collection -> Action m Document -- | Fetch all users of this database allUsers :: MonadIO m => Action m [Document] -- | Add user with password with read-only access if bool is True or -- read-write access if bool is False addUser :: (MonadIO m) => Bool -> Username -> Password -> Action m () removeUser :: (MonadIO m) => Username -> Action m () -- | "admin" database admin :: Database -- | Copy database from given host to the server I am connected to. Fails -- and returns "ok" = 0 if we don't have permission to read from -- given server (use copyDatabase in this case). cloneDatabase :: (MonadIO m) => Database -> Host -> Action m Document -- | Copy database from given host to the server I am connected to. If -- username & password is supplied use them to read from given host. copyDatabase :: (MonadIO m) => Database -> Host -> Maybe (Username, Password) -> Database -> Action m Document -- | Delete the given database! dropDatabase :: (MonadIO m) => Database -> Action m Document -- | Attempt to fix any corrupt records. This operation takes a while. repairDatabase :: (MonadIO m) => Database -> Action m Document serverBuildInfo :: (MonadIO m) => Action m Document serverVersion :: (MonadIO m) => Action m Text collectionStats :: (MonadIO m) => Collection -> Action m Document dataSize :: (MonadIO m) => Collection -> Action m Int storageSize :: (MonadIO m) => Collection -> Action m Int totalIndexSize :: (MonadIO m) => Collection -> Action m Int totalSize :: MonadIO m => Collection -> Action m Int data ProfilingLevel Off :: ProfilingLevel Slow :: ProfilingLevel All :: ProfilingLevel getProfilingLevel :: (MonadIO m) => Action m ProfilingLevel type MilliSec = Int setProfilingLevel :: (MonadIO m) => ProfilingLevel -> Maybe MilliSec -> Action m () dbStats :: (MonadIO m) => Action m Document type OpNum = Int -- | See currently running operation on the database, if any currentOp :: (MonadIO m) => Action m (Maybe Document) killOp :: (MonadIO m) => OpNum -> Action m (Maybe Document) serverStatus :: (MonadIO m) => Action m Document instance GHC.Classes.Eq Database.MongoDB.Admin.ProfilingLevel instance GHC.Enum.Enum Database.MongoDB.Admin.ProfilingLevel instance GHC.Show.Show Database.MongoDB.Admin.ProfilingLevel instance GHC.Classes.Eq Database.MongoDB.Admin.Index instance GHC.Show.Show Database.MongoDB.Admin.Index instance GHC.Classes.Eq Database.MongoDB.Admin.CollectionOption instance GHC.Show.Show Database.MongoDB.Admin.CollectionOption -- | Client interface to MongoDB database management system. -- -- Simple example below. -- --
-- {-# LANGUAGE OverloadedStrings #-}
-- {-# LANGUAGE ExtendedDefaultRules #-}
--
-- import Database.MongoDB
-- import Control.Monad.Trans (liftIO)
--
-- main :: IO ()
-- main = do
-- pipe <- connect (host "127.0.0.1")
-- e <- access pipe master "baseball" run
-- close pipe
-- print e
--
-- run :: Action IO ()
-- run = do
-- clearTeams
-- insertTeams
-- allTeams >>= printDocs "All Teams"
-- nationalLeagueTeams >>= printDocs "National League Teams"
-- newYorkTeams >>= printDocs "New York Teams"
--
-- clearTeams :: Action IO ()
-- clearTeams = delete (select [] "team")
--
-- insertTeams :: Action IO [Value]
-- insertTeams = insertMany "team" [
-- ["name" =: "Yankees", "home" =: ["city" =: "New York", "state" =: "NY"], "league" =: "American"],
-- ["name" =: "Mets", "home" =: ["city" =: "New York", "state" =: "NY"], "league" =: "National"],
-- ["name" =: "Phillies", "home" =: ["city" =: "Philadelphia", "state" =: "PA"], "league" =: "National"],
-- ["name" =: "Red Sox", "home" =: ["city" =: "Boston", "state" =: "MA"], "league" =: "American"] ]
--
-- allTeams :: Action IO [Document]
-- allTeams = rest =<< find (select [] "team") {sort = ["home.city" =: 1]}
--
-- nationalLeagueTeams :: Action IO [Document]
-- nationalLeagueTeams = rest =<< find (select ["league" =: "National"] "team")
--
-- newYorkTeams :: Action IO [Document]
-- newYorkTeams = rest =<< find (select ["home.state" =: "NY"] "team") {project = ["name" =: 1, "league" =: 1]}
--
-- printDocs :: String -> [Document] -> Action IO ()
-- printDocs title docs = liftIO $ putStrLn title >> mapM_ (print . exclude ["_id"]) docs
--
module Database.MongoDB
-- | MongoDB GridFS implementation
module Database.MongoDB.GridFS
-- | Files are stored in "buckets". You open a bucket with
-- openDefaultBucket or openBucket
data Bucket
files :: Bucket -> Text
chunks :: Bucket -> Text
data File
document :: File -> Document
bucket :: File -> Bucket
-- | Open the default Bucket (named "fs")
openDefaultBucket :: (Monad m, MonadIO m) => Action m Bucket
-- | Open a Bucket
openBucket :: (Monad m, MonadIO m) => Text -> Action m Bucket
-- | Find files in the bucket
findFile :: MonadIO m => Bucket -> Selector -> Action m [File]
-- | Find one file in the bucket
findOneFile :: MonadIO m => Bucket -> Selector -> Action m (Maybe File)
-- | Fetch one file in the bucket
fetchFile :: MonadIO m => Bucket -> Selector -> Action m File
-- | Delete files in the bucket
deleteFile :: (MonadIO m) => File -> Action m ()
-- | A producer for the contents of a file
sourceFile :: (Monad m, MonadIO m) => File -> Producer (Action m) ByteString
-- | A consumer that creates a file in the bucket and puts all consumed
-- data in it
sinkFile :: (Monad m, MonadIO m) => Bucket -> Text -> Consumer ByteString (Action m) File
-- | This module is for connecting to TLS enabled mongodb servers.
-- ATTENTION!!! Be aware that this module is highly experimental and is
-- barely tested. The current implementation doesn't verify server's
-- identity. It only allows you to connect to a mongodb server using TLS
-- protocol.
module Database.MongoDB.Transport.Tls
-- | Connect to mongodb using TLS
connect :: HostName -> PortID -> IO Pipe