-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | A driver for MongoDB -- -- This module lets you connect to MongoDB, do inserts, queries, updates, -- etc. @package mongoDB @version 0.7 -- | Miscellaneous general functions module Database.MongoDB.Internal.Util class (MonadIO m, Applicative m, Functor m) => MonadIO' m ignore :: (Monad m) => a -> m () -- | add element to end of list (snoc is reverse of cons, -- which adds to front of list) snoc :: [a] -> a -> [a] type Secs = Float -- | bit-or all numbers together bitOr :: (Bits a) => [a] -> a -- | Concat first and second together with period in between. Eg. -- "hello" <.> "world" = "hello.world" (<.>) :: UString -> UString -> UString -- | Repeatedy execute action, collecting results, until it returns Nothing loop :: (Functor m, Monad m) => m (Maybe a) -> m [a] -- | Is field's value a 1 or True (MongoDB use both Int and Bools for truth -- values). Error if field not in document or field not a Num or Bool. true1 :: Label -> Document -> Bool instance Ord PortID instance Eq PortID instance Show PortID instance (MonadIO m, Applicative m, Functor m) => MonadIO' m instance (Monad m, Error e) => Applicative (ErrorT e m) instance (Monad m) => Applicative (ReaderT r m) -- | Pipelining is sending multiple requests over a socket and receiving -- the responses later, in the same order. This is faster than sending -- one request, waiting for the response, then sending the next request, -- and so on. This implementation returns a promise (future) -- response for each request that when invoked waits for the response if -- not already arrived. Multiple threads can send on the same pipe (and -- get promises back); the pipe will pipeline each thread's request right -- away without waiting. module Control.Pipeline -- | Thread-safe and pipelined socket data Pipe handle bytes -- | Create new Pipe with given encodeInt, decodeInt, and handle. You -- should close pipe when finished, which will also close handle. -- If pipe is not closed but eventually garbage collected, it will be -- closed along with handle. newPipe :: (Stream h b, Resource IO h) => (Size -> b) -> (b -> Size) -> h -> IO (Pipe h b) -- | Send messages all together to destination (no messages will be -- interleaved between them). None of the messages can induce a response, -- i.e. the destination must not reply to any of these messages -- (otherwise future calls will get these responses instead of -- their own). Each message is preceeded by its length when written to -- socket. send :: (Stream h b) => Pipe h b -> [b] -> IO () -- | Send messages all together to destination (no messages will be -- interleaved between them), and return promise of response from -- one message only. One and only one message in the list must induce a -- response, i.e. the destination must reply to exactly one message only -- (otherwise promises will have the wrong responses in them). Each -- message is preceeded by its length when written to socket. Likewise, -- the response must be preceeded by its length. call :: (Stream h b) => Pipe h b -> [b] -> IO (IO b) type Size = Int class Length list length :: (Length list) => list -> Size class Resource m r close :: (Resource m r) => r -> m () isClosed :: (Resource m r) => r -> m Bool class Flush handle flush :: (Flush handle) => handle -> IO () class (Length bytes, Monoid bytes, Flush handle) => Stream handle bytes put :: (Stream handle bytes) => handle -> bytes -> IO () get :: (Stream handle bytes) => handle -> Int -> IO bytes -- | Read N bytes from hande, blocking until all N bytes are read. If EOF -- is reached before N bytes then throw EOF exception. getN :: (Stream h b) => h -> Int -> IO b instance (Resource IO h) => Resource IO (Pipe h b) instance Stream Handle ByteString instance Stream Handle ByteString instance Flush Handle instance Resource IO Handle instance Length ByteString instance Length ByteString -- | Low-level messaging between this client and the MongoDB server. See -- Mongo Wire Protocol -- (http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol). -- -- This module is not intended for direct use. Use the high-level -- interface at Database.MongoDB.Query and -- Database.MongoDB.Connection instead. module Database.MongoDB.Internal.Protocol -- | Thread-safe TCP connection to server with pipelined requests type Connection = Pipe Handle ByteString -- | New thread-safe pipelined connection over handle mkConnection :: Handle -> IO Connection -- | Send notices as a contiguous batch to server with no reply. Raise -- IOError if connection fails. send :: Connection -> [Notice] -> IO () -- | Send notices and request as a contiguous batch to server and return -- reply promise, which will block when invoked until reply arrives. This -- call and resulting promise will raise IOError if connection fails. call :: Connection -> [Notice] -> Request -> IO (IO Reply) -- | Database name and collection name with period (.) in between. Eg. -- "myDb.myCollection" type FullCollection = UString -- | A notice is a message that is sent with no reply data Notice Insert :: FullCollection -> [Document] -> Notice iFullCollection :: Notice -> FullCollection iDocuments :: Notice -> [Document] Update :: FullCollection -> [UpdateOption] -> Document -> Document -> Notice uFullCollection :: Notice -> FullCollection uOptions :: Notice -> [UpdateOption] uSelector :: Notice -> Document uUpdater :: Notice -> Document Delete :: FullCollection -> [DeleteOption] -> Document -> Notice dFullCollection :: Notice -> FullCollection dOptions :: Notice -> [DeleteOption] dSelector :: Notice -> Document KillCursors :: [CursorId] -> Notice kCursorIds :: Notice -> [CursorId] data UpdateOption -- | If set, the database will insert the supplied object into the -- collection if no matching document is found Upsert :: UpdateOption -- | If set, the database will update all matching objects in the -- collection. Otherwise only updates first matching doc MultiUpdate :: UpdateOption data DeleteOption -- | If set, the database will remove only the first matching document in -- the collection. Otherwise all matching documents will be removed SingleRemove :: DeleteOption type CursorId = Int64 -- | A request is a message that is sent with a Reply returned data Request Query :: [QueryOption] -> FullCollection -> Int32 -> Int32 -> Document -> Document -> Request qOptions :: Request -> [QueryOption] qFullCollection :: Request -> FullCollection -- | Number of initial matching documents to skip qSkip :: Request -> Int32 -- | The number of document to return in each batch response from the -- server. 0 means use Mongo default. Negative means close cursor after -- first batch and use absolute value as batch size. qBatchSize :: Request -> Int32 -- | [] = return all documents in collection qSelector :: Request -> Document -- | [] = return whole document qProjector :: Request -> Document GetMore :: FullCollection -> Int32 -> CursorId -> Request gFullCollection :: Request -> FullCollection gBatchSize :: Request -> Int32 gCursorId :: Request -> CursorId data QueryOption -- | Tailable means cursor is not closed when the last data is retrieved. -- Rather, the cursor marks the final object's position. You can resume -- using the cursor later, from where it was located, if more data were -- received. Like any latent cursor, the cursor may become invalid -- at some point for example if the final object it references were -- deleted. Thus, you should be prepared to requery on CursorNotFound -- exception. TailableCursor :: QueryOption -- | Allow query of replica slave. Normally these return an error except -- for namespace local. SlaveOK :: QueryOption NoCursorTimeout :: QueryOption -- | Use with TailableCursor. If we are at the end of the data, block for a -- while rather than returning no data. After a timeout period, we do -- return as normal. | Exhaust -- ^ Stream the data down full blast in -- multiple more packages, on the assumption that the client will -- fully read all data queried. Faster when you are pulling a lot of data -- and know you want to pull it all down. Note: the client is not allowed -- to not read all the data unless it closes the connection. AwaitData :: QueryOption -- | A reply is a message received in response to a Request data Reply Reply :: [ResponseFlag] -> CursorId -> Int32 -> [Document] -> Reply rResponseFlags :: Reply -> [ResponseFlag] -- | 0 = cursor finished rCursorId :: Reply -> CursorId rStartingFrom :: Reply -> Int32 rDocuments :: Reply -> [Document] data ResponseFlag -- | Set when getMore is called but the cursor id is not valid at the -- server. Returned with zero results. CursorNotFound :: ResponseFlag -- | Query error. Returned with one document containing an $err -- field holding the error message. QueryError :: ResponseFlag -- | For backward compatability: Set when the server supports the AwaitData -- query option. if it doesn't, a replica slave client should sleep a -- little between getMore's AwaitCapable :: ResponseFlag type Username = UString type Password = UString type Nonce = UString pwHash :: Username -> Password -> UString pwKey :: Nonce -> Username -> Password -> UString instance Show ResponseFlag instance Eq ResponseFlag instance Enum ResponseFlag instance Show Reply instance Eq Reply instance Show QueryOption instance Eq QueryOption instance Show Request instance Eq Request instance Show DeleteOption instance Eq DeleteOption instance Show UpdateOption instance Eq UpdateOption instance Show Notice instance Eq Notice -- | This is just like Control.Monad.Error.Class except you can -- throw/catch the error of any ErrorT in the monad stack instead of just -- the top one as long as the error types are different. If two or more -- ErrorTs in the stack have the same error type you get the error of the -- top one. module Control.Monad.Throw -- | Same as MonadError but without functional dependency so the -- same monad can have multiple errors with different types class (Monad m) => Throw e m throw :: (Throw e m) => e -> m a catch :: (Throw e m) => m a -> (e -> m a) -> m a -- | Execute action and throw exception if result is Left, otherwise return -- the Right result throwLeft :: (Throw e m) => m (Either e a) -> m a instance [overlap ok] (Throw e m) => Throw e (ReaderT x m) instance [overlap ok] (Error e, Throw e m, Error x) => Throw e (ErrorT x m) instance [overlap ok] (Error e, Monad m) => Throw e (ErrorT e m) instance [overlap ok] (Error e) => Throw e (Either e) -- | A replica set is a set of servers that mirror each other (a -- non-replicated server can act like a replica set of one). One server -- in a replica set is the master and the rest are slaves. When the -- master goes down, one of the slaves becomes master. The ReplicaSet -- object in this client maintains a list of servers that it currently -- knows are in the set. It refreshes this list every time it establishes -- a new connection with one of the servers in the set. Each server in -- the set knows who the other member in the set are, and who is master. -- The user asks the ReplicaSet object for a new master or slave -- connection. When a connection fails, the user must ask the ReplicaSet -- for a new connection (which most likely will connect to another server -- since the previous one failed). When connecting to a new server you -- loose all session state that was stored with the old server, which -- includes open cursors and temporary map-reduce output collections. -- Attempting to read from a lost cursor on a new server will raise a -- ServerFailure exception. Attempting to read a lost map-reduce temp -- output on a new server will return an empty set (not an error, like it -- maybe should). module Database.MongoDB.Connection -- | Execute action that raises IOError only on network problem. Other -- IOErrors like file access errors are not caught by this. runNet :: ErrorT IOError m a -> m (Either IOError a) data Host Host :: HostName -> PortID -> Host data PortID :: * Service :: String -> PortID PortNumber :: PortNumber -> PortID UnixSocket :: String -> PortID -- | Host on default MongoDB port host :: HostName -> Host -- | Display host as "host:port" showHostPort :: Host -> String -- | Read string "hostname:port" as Host hostname port or -- "hostname" as host hostname (default port). Error if string -- does not match either syntax. readHostPort :: String -> Host -- | Read string "hostname:port" as Host hosthame port or -- "hostname" as host hostname (default port). Fail if string -- does not match either syntax. readHostPortM :: (Monad m) => String -> m Host -- | Reference to a replica set of hosts. Ok if really not a replica set -- and just a stand-alone server, in which case it acts like a replica -- set of one. data ReplicaSet -- | Create a reference to a replica set with given hosts as the initial -- seed list (a subset of the hosts in the replica set) replicaSet :: [Host] -> IO ReplicaSet -- | Return current list of known hosts in replica set. This list is -- updated on every newConnection. replicas :: ReplicaSet -> IO [Host] -- | Create a connection to a master or slave in the replica set. Throw -- IOError if failed to connect to any host in replica set that is the -- right master/slave type. close connection when you are done -- using it even if a failure is raised. Garbage collected connections -- will be closed automatically (but don't rely on this when creating -- many connections). TODO: prefer slave over master when SlaveOk and -- both are available. newConnection :: (Throw IOError m, MonadIO' m) => MasterOrSlaveOk -> ReplicaSet -> m Connection data MasterOrSlaveOk -- | connect to master only Master :: MasterOrSlaveOk -- | connect to a slave, or master if no slave available SlaveOk :: MasterOrSlaveOk -- | Thread-safe TCP connection to server with pipelined requests type Connection = Pipe Handle ByteString -- | Create a connection to the given host (as opposed to connecting to -- some host in a replica set via newConnection). Throw IOError if -- can't connect. connect :: (Throw IOError m, MonadIO' m) => Host -> m Connection class Resource m r close :: (Resource m r) => r -> m () isClosed :: (Resource m r) => r -> m Bool instance Show MasterOrSlaveOk instance Eq MasterOrSlaveOk instance Show ReplicaInfo instance Eq ReplicaInfo instance Show Host instance Eq Host instance Ord Host -- | This is just like Control.Monad.Reader.Class except you can -- access the context of any Reader in the monad stack instead of just -- the top one as long as the context types are different. If two or more -- readers in the stack have the same context type you get the context of -- the top one. module Control.Monad.Context -- | Same as MonadReader but without functional dependency so the -- same monad can have multiple contexts with different types class (Monad m) => Context x m context :: (Context x m) => m x push :: (Context x m) => (x -> x) -> m a -> m a instance [overlap ok] (Context x m, Error e) => Context x (ErrorT e m) instance [overlap ok] (Context x m) => Context x (ReaderT r m) instance [overlap ok] (Monad m) => Context x (ReaderT x m) -- | Query and update documents residing on a MongoDB server(s) module Database.MongoDB.Query -- | Monad with access to a Connection, MasterOrSlaveOk, and -- WriteMode, and throws a Failure on read/write failure -- and IOError on connection failure data Connected m a -- | Run action with access to connection. It starts out assuming it is -- master (invoke slaveOk inside it to change that) and that -- writes don't need to be check (invoke writeMode to change -- that). Return Left Failure if error in execution. Throws IOError if -- connection fails during execution. runConn :: Connected m a -> Connection -> m (Either Failure a) -- | A monad with access to a Connection, MasterOrSlaveOk, -- and WriteMode, and throws Failure on read/write failure -- and IOError on connection failure class (Context Connection m, Context MasterOrSlaveOk m, Context WriteMode m, Throw Failure m, Throw IOError m, MonadIO' m) => Conn m -- | Read or write exception like cursor expired or inserting a duplicate -- key. Note, unexpected data from the server is not a Failure, rather it -- is a programming error (you should call error in this case) -- because the client and server are incompatible and requires a -- programming change. data Failure -- | Cursor expired because it wasn't accessed for over 10 minutes, or this -- cursor came from a different server that the one you are currently -- connected to (perhaps a fail over happen between servers in a replica -- set) CursorNotFoundFailure :: CursorId -> Failure -- | Query failed for some reason as described in the string QueryFailure :: String -> Failure -- | Error observed by getLastError after a write, error description is in -- string WriteFailure :: ErrorCode -> String -> Failure -- | Database name type Database = UString -- | List all databases residing on server allDatabases :: (Conn m) => m [Database] -- | A Conn monad with access to a Database class (Context Database m, Conn m) => DbConn m -- | Run Db action against given database useDb :: Database -> ReaderT Database m a -> m a -- | Current database in use thisDatabase :: (DbConn m) => m Database type Username = UString type Password = UString -- | Authenticate with the database (if server is running in secure mode). -- Return whether authentication was successful or not. Reauthentication -- is required for every new connection. auth :: (DbConn m) => Username -> Password -> m Bool -- | Collection name (not prefixed with database) type Collection = UString -- | List all collections in this database allCollections :: (DbConn m) => m [Collection] -- | Selects documents in collection that match selector data Selection Select :: Selector -> Collection -> Selection selector :: Selection -> Selector coll :: Selection -> Collection -- | Filter for a query, analogous to the where clause in SQL. [] -- matches all documents in collection. [x =: a, y =: b] is -- analogous to where x = a and y = b in SQL. See -- http://www.mongodb.org/display/DOCS/Querying for full selector -- syntax. type Selector = Document -- | Add Javascript predicate to selector, in which case a document must -- match both selector and predicate whereJS :: Selector -> Javascript -> Selector class Select aQueryOrSelection select :: (Select aQueryOrSelection) => Selector -> Collection -> aQueryOrSelection -- | Default write-mode is Unsafe data WriteMode -- | Submit writes without receiving acknowledgments. Fast. Assumes writes -- succeed even though they may not. Unsafe :: WriteMode -- | Receive an acknowledgment after every write, and raise exception if -- one says the write failed. Safe :: WriteMode -- | Run action with given WriteMode writeMode :: (Conn m) => WriteMode -> m a -> m a -- | Insert document into collection and return its "_id" value, which is -- created automatically if not supplied insert :: (DbConn m) => Collection -> Document -> m Value -- | Same as insert except don't return _id insert_ :: (DbConn m) => Collection -> Document -> m () -- | Insert documents into collection and return their "_id" values, which -- are created automatically if not supplied insertMany :: (DbConn m) => Collection -> [Document] -> m [Value] -- | Same as insertMany except don't return _ids insertMany_ :: (DbConn m) => Collection -> [Document] -> m () -- | Save document to collection, meaning insert it if its new (has no -- "_id" field) or update it if its not new (has "_id" field) save :: (DbConn m) => Collection -> Document -> m () -- | Replace first document in selection with given document replace :: (DbConn m) => Selection -> Document -> m () -- | Replace first document in selection with given document, or insert -- document if selection is empty repsert :: (DbConn m) => Selection -> Document -> m () -- | Update operations on fields in a document. See -- http://www.mongodb.org/display/DOCS/Updating#Updating-ModifierOperations type Modifier = Document -- | Update all documents in selection using given modifier modify :: (DbConn m) => Selection -> Modifier -> m () -- | Delete all documents in selection delete :: (DbConn m) => Selection -> m () -- | Delete first document in selection deleteOne :: (DbConn m) => Selection -> m () -- | Ok to execute given action against slave, ie. eventually consistent -- reads slaveOk :: (Conn m) => m a -> m a -- | Use select to create a basic query with defaults, then modify -- if desired. For example, (select sel col) {limit = 10} data Query Query :: [QueryOption] -> Selection -> Projector -> Word32 -> Limit -> Order -> Bool -> BatchSize -> Order -> Query -- | Default = [] options :: Query -> [QueryOption] selection :: Query -> Selection -- | [] = all fields. Default = [] project :: Query -> Projector -- | Number of initial matching documents to skip. Default = 0 skip :: Query -> Word32 -- | Maximum number of documents to return, 0 = no limit. Default = 0 limit :: Query -> Limit -- | Sort results by this order, [] = no sort. Default = [] sort :: Query -> Order -- | If true assures no duplicates are returned, or objects missed, which -- were present at both the start and end of the query's execution (even -- if the object were updated). If an object is new during the query, or -- deleted during the query, it may or may not be returned, even with -- snapshot mode. Note that short query responses (less than 1MB) are -- always effectively snapshotted. Default = False snapshot :: Query -> Bool -- | The number of document to return in each batch response from the -- server. 0 means use Mongo default. Default = 0 batchSize :: Query -> BatchSize -- | Force MongoDB to use this index, [] = no hint. Default = [] hint :: Query -> Order data QueryOption -- | Tailable means cursor is not closed when the last data is retrieved. -- Rather, the cursor marks the final object's position. You can resume -- using the cursor later, from where it was located, if more data were -- received. Like any latent cursor, the cursor may become invalid -- at some point for example if the final object it references were -- deleted. Thus, you should be prepared to requery on CursorNotFound -- exception. TailableCursor :: QueryOption NoCursorTimeout :: QueryOption -- | Use with TailableCursor. If we are at the end of the data, block for a -- while rather than returning no data. After a timeout period, we do -- return as normal. AwaitData :: QueryOption -- | Fields to return, analogous to the select clause in SQL. [] -- means return whole document (analogous to * in SQL). [x =: 1, y =: -- 1] means return only x and y fields of each -- document. [x =: 0] means return all fields except x. type Projector = Document -- | Maximum number of documents to return, i.e. cursor will close after -- iterating over this number of documents. 0 means no limit. type Limit = Word32 -- | Fields to sort by. Each one is associated with 1 or -1. Eg. [x =: -- 1, y =: -1] means sort by x ascending then y -- descending type Order = Document -- | The number of document to return in each batch response from the -- server. 0 means use Mongo default. type BatchSize = Word32 -- | Return performance stats of query execution explain :: (DbConn m) => Query -> m Document -- | Fetch documents satisfying query find :: (DbConn m) => Query -> m Cursor -- | Fetch first document satisfying query or Nothing if none satisfy it findOne :: (DbConn m) => Query -> m (Maybe Document) -- | Fetch number of documents satisfying query (including effect of skip -- and/or limit if present) count :: (DbConn m) => Query -> m Int -- | Fetch distinct values of field in selected documents distinct :: (DbConn m) => Label -> Selection -> m [Value] -- | Iterator over results of a query. Use next to iterate or -- rest to get all results. A cursor is closed when it is -- explicitly closed, all results have been read from it, garbage -- collected, or not used for over 10 minutes (unless -- NoCursorTimeout option was specified in Query). Reading -- from a closed cursor raises a CursorNotFoundFailure. Note, a -- cursor is not closed when the connection is closed, so you can open -- another connection to the same server and continue using the cursor. data Cursor -- | Return next document in query result, or Nothing if finished. next :: (Conn m) => Cursor -> m (Maybe Document) -- | Return next N documents or less if end is reached nextN :: (Conn m) => Int -> Cursor -> m [Document] -- | Return remaining documents in query result rest :: (Conn m) => Cursor -> m [Document] -- | Groups documents in collection by key then reduces (aggregates) each -- group data Group Group :: Collection -> GroupKey -> Javascript -> Document -> Selector -> Maybe Javascript -> Group gColl :: Group -> Collection -- | Fields to group by gKey :: Group -> GroupKey -- | (doc, agg) -> (). The reduce function reduces (aggregates) -- the objects iterated. Typical operations of a reduce function include -- summing and counting. It takes two arguments, the current document -- being iterated over and the aggregation value, and updates the -- aggregate value. gReduce :: Group -> Javascript -- | agg. Initial aggregation value supplied to reduce gInitial :: Group -> Document -- | Condition that must be true for a row to be considered. [] means -- always true. gCond :: Group -> Selector -- | agg -> () | result. An optional function to be run on each -- item in the result set just before the item is returned. Can either -- modify the item (e.g., add an average field given a count and a total) -- or return a replacement object (returning a new object with just _id -- and average fields). gFinalize :: Group -> Maybe Javascript -- | Fields to group by, or function (doc -> key) returning a -- key object to be used as the grouping key. Use KeyF instead of -- Key to specify a key that is not an existing member of the object (or, -- to access embedded members). data GroupKey Key :: [Label] -> GroupKey KeyF :: Javascript -> GroupKey -- | Execute group query and return resulting aggregate value for each -- distinct key group :: (DbConn m) => Group -> m [Document] -- | Maps every document in collection to a list of (key, value) pairs, -- then for each unique key reduces all its associated values from all -- lists to a single result. There are additional parameters that may be -- set to tweak this basic operation. data MapReduce MapReduce :: Collection -> MapFun -> ReduceFun -> Selector -> Order -> Limit -> Maybe Collection -> Bool -> Maybe FinalizeFun -> Document -> Bool -> MapReduce rColl :: MapReduce -> Collection rMap :: MapReduce -> MapFun rReduce :: MapReduce -> ReduceFun -- | Operate on only those documents selected. Default is [] meaning all -- documents. rSelect :: MapReduce -> Selector -- | Default is [] meaning no sort rSort :: MapReduce -> Order -- | Default is 0 meaning no limit rLimit :: MapReduce -> Limit -- | Output to given permanent collection, otherwise output to a new -- temporary collection whose name is returned. rOut :: MapReduce -> Maybe Collection -- | If True, the temporary output collection is made permanent. If False, -- the temporary output collection persists for the life of the current -- connection only, however, other connections may read from it while the -- original one is still alive. Note, reading from a temporary collection -- after its original connection dies returns an empty result (not an -- error). The default for this attribute is False, unless rOut is -- specified, then the collection permanent. rKeepTemp :: MapReduce -> Bool -- | Function to apply to all the results when finished. Default is -- Nothing. rFinalize :: MapReduce -> Maybe FinalizeFun -- | Variables (environment) that can be accessed from -- mapreducefinalize. Default is []. rScope :: MapReduce -> Document -- | Provide statistics on job execution time. Default is False. rVerbose :: MapReduce -> Bool -- | () -> void. The map function references the variable -- this to inspect the current object under consideration. The -- function must call emit(key,value) at least once, but may be -- invoked any number of times, as may be appropriate. type MapFun = Javascript -- | (key, value_array) -> value. The reduce function receives -- a key and an array of values and returns an aggregate result value. -- The MapReduce engine may invoke reduce functions iteratively; thus, -- these functions must be idempotent. That is, the following must hold -- for your reduce function: for all k, vals : reduce(k, -- [reduce(k,vals)]) == reduce(k,vals). If you need to perform an -- operation only once, use a finalize function. The output of emit (the -- 2nd param) and reduce should be the same format to make iterative -- reduce possible. type ReduceFun = Javascript -- | (key, value) -> final_value. A finalize function may be -- run after reduction. Such a function is optional and is not necessary -- for many map/reduce cases. The finalize function takes a key and a -- value, and returns a finalized value. type FinalizeFun = Javascript -- | MapReduce on collection with given map and reduce functions. Remaining -- attributes are set to their defaults, which are stated in their -- comments. mapReduce :: Collection -> MapFun -> ReduceFun -> MapReduce -- | Run MapReduce and return cursor of results. Error if map/reduce fails -- (because of bad Javascript) TODO: Delete temp result collection when -- cursor closes. Until then, it will be deleted by the server when -- connection closes. runMR :: (DbConn m) => MapReduce -> m Cursor -- | Run MapReduce and return a result document containing a result -- field holding the output Collection and additional statistic fields. -- Error if the map/reduce failed (because of bad Javascript). runMR' :: (DbConn m) => MapReduce -> m Document -- | A command is a special query or action against the database. See -- http://www.mongodb.org/display/DOCS/Commands for details. type Command = Document -- | Run command against the database and return its result runCommand :: (DbConn m) => Command -> m Document -- |
-- runCommand1 foo = runCommand [foo =: 1] --runCommand1 :: (DbConn m) => UString -> m Document -- | Run code on server eval :: (DbConn m) => Javascript -> m Document instance (Throw IOError m) => Throw IOError (Connected m) instance Show MapReduce instance Eq MapReduce instance Show GroupKey instance Eq GroupKey instance Show Group instance Eq Group instance Show QueryOption instance Eq QueryOption instance Show Query instance Eq Query instance Show WriteMode instance Eq WriteMode instance Show Selection instance Eq Selection instance Show Failure instance Eq Failure instance (Monad m) => Context Connection (Connected m) instance (Monad m) => Context MasterOrSlaveOk (Connected m) instance (Monad m) => Context WriteMode (Connected m) instance (Monad m) => Throw Failure (Connected m) instance (MonadIO m) => MonadIO (Connected m) instance (Monad m) => Monad (Connected m) instance (Monad m) => Applicative (Connected m) instance (Monad m) => Functor (Connected m) instance (Conn m) => Resource m Cursor instance Select Query instance Select Selection instance (Context Database m, Conn m) => DbConn m instance Error Failure instance (Context Connection m, Context MasterOrSlaveOk m, Context WriteMode m, Throw Failure m, Throw IOError m, MonadIO' m) => Conn m instance MonadTrans Connected -- | Database administrative functions module Database.MongoDB.Admin data CollectionOption Capped :: CollectionOption MaxByteSize :: Int -> CollectionOption MaxItems :: Int -> CollectionOption -- | Create collection with given options. You only need to call this to -- set options, otherwise a collection is created automatically on first -- use with no options. createCollection :: (DbConn m) => [CollectionOption] -> Collection -> m Document -- | Rename first collection to second collection renameCollection :: (DbConn m) => Collection -> Collection -> m Document -- | Delete the given collection! Return True if collection existed (and -- was deleted); return False if collection did not exist (and no -- action). dropCollection :: (DbConn m) => Collection -> m Bool -- | This operation takes a while validateCollection :: (DbConn m) => Collection -> m Document data Index Index :: Collection -> Order -> IndexName -> Bool -> Bool -> Index iColl :: Index -> Collection iKey :: Index -> Order iName :: Index -> IndexName iUnique :: Index -> Bool iDropDups :: Index -> Bool type IndexName = UString -- | Spec of index of ordered keys on collection. Name is generated from -- keys. Unique and dropDups are False. index :: Collection -> Order -> Index -- | Create index if we did not already create one. May be called -- repeatedly with practically no performance hit, because we remember if -- we already called this for the same index (although this memory gets -- wiped out every 15 minutes, in case another client drops the index and -- we want to create it again). ensureIndex :: (DbConn m) => Index -> m () -- | Create index on the server. This call goes to the server every time. createIndex :: (DbConn m) => Index -> m () -- | Remove the index dropIndex :: (DbConn m) => Collection -> IndexName -> m Document -- | Get all indexes on this collection getIndexes :: (DbConn m) => Collection -> m [Document] -- | Drop all indexes on this collection dropIndexes :: (DbConn m) => Collection -> m Document -- | Fetch all users of this database allUsers :: (DbConn m) => m [Document] -- | Add user with password with read-only access if bool is True or -- read-write access if bool is False addUser :: (DbConn m) => Bool -> Username -> Password -> m () removeUser :: (DbConn m) => Username -> m () -- | Copy database from given host to the server I am connected to. Fails -- and returns ok = 0 if we don't have permission to read -- from given server (use copyDatabase in this case). cloneDatabase :: (Conn m) => Database -> Host -> m Document -- | Copy database from given host to the server I am connected to. If -- username & password is supplied use them to read from given host. copyDatabase :: (Conn m) => Database -> Host -> Maybe (Username, Password) -> Database -> m Document -- | Delete the given database! dropDatabase :: (Conn m) => Database -> m Document -- | Attempt to fix any corrupt records. This operation takes a while. repairDatabase :: (Conn m) => Database -> m Document serverBuildInfo :: (Conn m) => m Document serverVersion :: (Conn m) => m UString collectionStats :: (DbConn m) => Collection -> m Document dataSize :: (DbConn m) => Collection -> m Int storageSize :: (DbConn m) => Collection -> m Int totalIndexSize :: (DbConn m) => Collection -> m Int totalSize :: (DbConn m) => Collection -> m Int data ProfilingLevel getProfilingLevel :: (DbConn m) => m ProfilingLevel type MilliSec = Int setProfilingLevel :: (DbConn m) => ProfilingLevel -> Maybe MilliSec -> m () dbStats :: (DbConn m) => m Document type OpNum = Int -- | See currently running operation on the database, if any currentOp :: (DbConn m) => m (Maybe Document) killOp :: (DbConn m) => OpNum -> m (Maybe Document) serverStatus :: (Conn m) => m Document instance Show ProfilingLevel instance Enum ProfilingLevel instance Eq ProfilingLevel instance Show Index instance Eq Index instance Show CollectionOption instance Eq CollectionOption -- | Client interface to MongoDB server(s). -- -- Simple example: -- --
--
--
-- import Database.MongoDB
--
-- main = do
-- e <- connect (server "127.0.0.1")
-- conn <- either (fail . show) return e
-- e <- runConn run conn
-- either (fail . show) return e
--
-- run = useDb "baseball" $ do
-- clearTeams
-- insertTeams
-- print' "All Teams" =<< allTeams
-- print' "National League Teams" =<< nationalLeagueTeams
-- print' "New York Teams" =<< newYorkTeams
--
-- clearTeams = delete (select [] "team")
--
-- insertTeams = insertMany "team" [
-- ["name" =: "Yankees", "home" =: ["city" =: "New York", "state" =: "NY"], "league" =: "American"],
-- ["name" =: "Mets", "home" =: ["city" =: "New York", "state" =: "NY"], "league" =: "National"],
-- ["name" =: "Phillies", "home" =: ["city" =: "Philadelphia", "state" =: "PA"], "league" =: "National"],
-- ["name" =: "Red Sox", "home" =: ["city" =: "Boston", "state" =: "MA"], "league" =: "American"] ]
--
-- allTeams = rest =<< find (select [] "team") {sort = ["city" =: 1]}
--
-- nationalLeagueTeams = rest =<< find (select ["league" =: "National"] "team")
--
-- newYorkTeams = rest =<< find (select ["home.state" =: "NY"] "team") {project = ["name" =: 1, "league" =: 1]}
--
-- print' title docs = liftIO $ putStrLn title >> mapM_ print docs
--
module Database.MongoDB