-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Haskell distributed parallel Haskell
--
-- Haskell distributed parallel Haskell (HdpH) is a Haskell DSL for
-- distributed-memory parallelism, implemented entirely in Haskell (as
-- supported by GHC).
@package hdph
@version 0.0.1
module Control.Parallel.HdpH.Conf
-- | RTSConf is a record data type collecting a number of parameter
-- governing the behaviour of the HdpH runtime system.
data RTSConf
RTSConf :: Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> Int -> String -> RTSConf
-- | Debug level, a number defined in module
-- Control.Parallel.HdpH.Internal.Location. Default is 0
-- (corresponding to no debug output).
debugLvl :: RTSConf -> Int
-- | Number of concurrent schedulers per node. Must be positive and should
-- be <= to the number of HECs (as set by GHC RTS option
-- -N). Default is 1.
scheds :: RTSConf -> Int
-- | Interval in microseconds to wake up sleeping schedulers (which is
-- necessary to recover from a race condition between concurrent
-- schedulers). Must be positive. Default is 1000 (corresponding to 1
-- millisecond).
wakeupDly :: RTSConf -> Int
-- | Number of hops a FISH message may travel before being considered
-- failed. Must be non-negative. Default is 7.
maxHops :: RTSConf -> Int
-- | Low sparkpool watermark for fishing. RTS will send FISH message unless
-- size of spark pool is greater than maxFish (or unless a FISH is
-- outstanding). Must be non-negative; should be <
-- minSched. Default is 1.
maxFish :: RTSConf -> Int
-- | Low sparkpool watermark for scheduling. RTS will respond to FISH
-- messages by SCHEDULEing sparks unless size of spark pool is less than
-- minSched. Must be non-negative; should be >
-- maxFish. Default is 2.
minSched :: RTSConf -> Int
-- | After a failed FISH, minimal delay in microseconds before sending
-- another FISH message; the actual delay is chosen randomly between
-- minFishDly and maxFishDly. Must be non-negative; should
-- be <= maxFishDly. Default is 10000 (corresponding
-- to 10 milliseconds).
minFishDly :: RTSConf -> Int
-- | After a failed FISH, maximal delay in microseconds before sending
-- another FISH message; the actual delay is chosen randomly between
-- minFishDly and maxFishDly. Must be non-negative; should
-- be >= minFishDly. Default is 1000000 (corresponding
-- to 1 second).
maxFishDly :: RTSConf -> Int
-- | Number of nodes constituting the distributed runtime system. Must be
-- positive. Default is 1.
numProcs :: RTSConf -> Int
-- | Network interface, required to autodetect a node's IP address. The
-- string must be one of the interface names returned by the POSIX
-- command ifconfig. Default is eth0 (corresponding to
-- the first Ethernet interface).
networkInterface :: RTSConf -> String
-- | Default runtime system configuration parameters.
defaultRTSConf :: RTSConf
module Control.Parallel.HdpH
-- | Par is type constructor of kind *->* and an
-- instance of classes Functor and Monad. Par is
-- defined in terms of a parametric continuation monad ParM by
-- plugging in RTS, the state monad of the runtime system. Since
-- neither ParM nor RTS are exported, Par can be
-- considered abstract.
type Par = ParM RTS
-- | Eliminates the Par monad by executing the given parallel
-- computation p, including setting up and initialising a
-- distributed runtime system according to the configuration parameter
-- conf. This function lives in the IO monad because p
-- may be impure, for instance, p may exhibit non-determinism.
-- Caveat: Though the computation p will only be started on a
-- single root node, runParIO_ must be executed on every node of
-- the distributed runtime system du to the SPMD nature of HdpH. Note
-- that the configuration parameter conf applies to all nodes
-- uniformly; at present there is no support for heterogeneous
-- configurations.
runParIO_ :: RTSConf -> Par () -> IO ()
-- | Convenience: variant of runParIO_ which does return a result.
-- Caveat: The result is only returned on the root node; all other nodes
-- return Nothing.
runParIO :: RTSConf -> Par a -> IO (Maybe a)
-- | Terminates the current thread.
done :: Par a
-- | Returns the node this operation is currently executed on.
myNode :: Par NodeId
-- | Returns a list of all nodes currently forming the distributed runtime
-- system.
allNodes :: Par [NodeId]
-- | Lifts an IO action into the Par monad.
io :: IO a -> Par a
-- | Evaluates its argument to weak head normal form.
eval :: a -> Par a
-- | Evaluates its argument to normal form (as defined by NFData
-- instance).
force :: NFData a => a -> Par a
-- | Creates a new thread, to be executed on the current node.
fork :: Par () -> Par ()
-- | Creates a spark, to be available for work stealing. The spark may be
-- converted into a thread and executed locally, or it may be stolen by
-- another node and executed there.
spark :: Closure (Par ()) -> Par ()
-- | Pushes a computation to the given node, where it is eagerly converted
-- into a thread and executed.
pushTo :: Closure (Par ()) -> NodeId -> Par ()
-- | Creates a new empty IVar.
new :: Par (IVar a)
-- | Writes to given IVar (without forcing the value written).
put :: IVar a -> a -> Par ()
-- | Reads from given IVar; blocks if the IVar is empty.
get :: IVar a -> Par a
-- | Reads from given IVar; does not block but returns Nothing if
-- IVar empty.
tryGet :: IVar a -> Par (Maybe a)
-- | Tests whether given IVar is empty or full; does not block.
probe :: IVar a -> Par Bool
-- | Globalises given IVar, returning a globally unique handle; this
-- operation is restricted to IVars of Closure type.
glob :: IVar (Closure a) -> Par (GIVar (Closure a))
-- | Writes to (possibly remote) IVar denoted by given global handle; this
-- operation is restricted to write valueso of Closure type.
rput :: GIVar (Closure a) -> Closure a -> Par ()
-- | A NodeId identifies a node (that is, an OS process running
-- HdpH). A NodeId should be thought of as an abstract identifier
-- which instantiates the classes Eq, Ord, Show,
-- NFData and Serialize.
data NodeId
-- | An IVar is a write-once one place buffer. IVars are abstract; they can
-- be accessed and manipulated only by the operations put,
-- get, tryGet, probe and glob.
data IVar a
-- | A GIVar (short for global IVar) is a globally unique handle
-- referring to an IVar. Unlike IVars, GIVars can be compared and
-- serialised. They can also be written to remotely by the operation
-- rput.
data GIVar a
-- | Returns the node hosting the IVar referred to by the given GIVar. This
-- function being pure implies that IVars cannot migrate between nodes.
at :: GIVar a -> NodeId
-- | Static declaration of Static deserialisers used in explicit Closures
-- created or imported by this module. This Static declaration must be
-- imported by every main module using HdpH. The imported Static
-- declaration must be combined with the main module's own Static
-- declaration and registered; failure to do so may abort the program at
-- runtime.
declareStatic :: StaticDecl
instance Eq NodeId
instance Ord NodeId
instance NFData NodeId
instance Serialize NodeId
instance Eq (GIVar a)
instance Ord (GIVar a)
instance NFData (GIVar a)
instance Serialize (GIVar a)
instance Show (GIVar a)
instance Show NodeId
module Control.Parallel.HdpH.Strategies
-- | A Strategy for type a is a (semantic)
-- identity in the Par monad. For an elaboration of this
-- concept (in the context of the Eval monad) see the paper:
-- Marlow et al. Seq no more: Better Strategies for parallel
-- Haskell. Haskell 2010.
type Strategy a = a -> Par a
-- | Strategy application is actual application (in the Par
-- monad).
using :: a -> Strategy a -> Par a
-- | Do Nothing strategy.
r0 :: Strategy a
-- | Evaluate head-strict strategy; probably not very useful in
-- HdpH.
rseq :: Strategy a
-- | Evaluate fully strategy.
rdeepseq :: NFData a => Strategy a
-- | forceC is the fully forcing Closure strategy,
-- ie. it fully normalises the thunk inside an explicit
-- Closure. Importantly, forceC alters the
-- serialisable Closure represention so that
-- serialisation will not force the Closure again.
forceC :: (NFData a, ToClosure a) => Strategy (Closure a)
-- | forceCC is a Closure wrapping the fully
-- forcing Closure strategy forceC; see the tutorial in
-- module Closure for details on the implementation of
-- forceCC.
forceCC :: ForceCC a => Closure (Strategy (Closure a))
-- | Indexing class, recording which types support forceCC;
-- see the tutorial in module Closure for a more thorough
-- explanation.
class (NFData a, ToClosure a) => ForceCC a
locForceCC :: ForceCC a => LocT (Strategy (Closure a))
-- | Type synonym for declaring the Static deserialisers
-- required by ForceCC instances; see the tutorial in
-- module Closure for a more thorough explanation.
type StaticForceCC a = Static (Env -> Strategy (Closure a))
-- | Static deserialiser required by a ForceCC
-- instance; see the tutorial in module Closure for a more
-- thorough explanation.
staticForceCC :: ForceCC a => StaticForceCC a
-- | A ProtoStrategy is almost a Strategy.
-- More precisely, a ProtoStrategy for type a is
-- a delayed (semantic) identity function in the
-- Par monad, ie. it returns an IVar
-- (rather than a term) of type a.
type ProtoStrategy a = a -> Par (IVar a)
-- | sparkClosure clo_strat is a ProtoStrategy
-- that sparks a Closure; evaluation of the sparked
-- Closure is governed by the strategy
-- unClosure clo_strat.
sparkClosure :: Closure (Strategy (Closure a)) -> ProtoStrategy (Closure a)
-- | pushClosure clo_strat n is a ProtoStrategy
-- that pushes a Closure to be executed in a new thread
-- on node n; evaluation of the pushed Closure
-- is governed by the strategy unClosure clo_strat.
pushClosure :: Closure (Strategy (Closure a)) -> NodeId -> ProtoStrategy (Closure a)
-- | Evaluate each element of a list according to the given strategy.
evalList :: Strategy a -> Strategy [a]
-- | Specialisation of evalList to a list of Closures
-- (wrapped in a Closure). Useful for building clustering strategies.
evalClosureListClosure :: Strategy (Closure a) -> Strategy (Closure [Closure a])
-- | Evaluate each element of a list of Closures in parallel according to
-- the given strategy (wrapped in a Closure). Work is distributed by lazy
-- work stealing.
parClosureList :: Closure (Strategy (Closure a)) -> Strategy [Closure a]
-- | Evaluate each element of a list of Closures in parallel according to
-- the given strategy (wrapped in a Closure). Work is pushed round-robin
-- to the given list of nodes.
pushClosureList :: Closure (Strategy (Closure a)) -> [NodeId] -> Strategy [Closure a]
-- | Evaluate each element of a list of Closures in parallel according to
-- the given strategy (wrapped in a Closure). Work is pushed randomly to
-- the given list of nodes.
pushRandClosureList :: Closure (Strategy (Closure a)) -> [NodeId] -> Strategy [Closure a]
-- | parClosureListClusterBy cluster uncluster is a generic
-- parallel clustering strategy combinator for lists of Closures,
-- evaluating clusters generated by cluster in parallel.
-- Clusters are distributed by lazy work stealing. The function
-- uncluster must be a left inverse of cluster,
-- that is uncluster . cluster must be the identity.
parClosureListClusterBy :: ([Closure a] -> [[Closure a]]) -> ([[Closure a]] -> [Closure a]) -> Closure (Strategy (Closure a)) -> Strategy [Closure a]
-- | parClosureListChunked n evaluates chunks of size n
-- of a list of Closures in parallel according to the given strategy
-- (wrapped in a Closure). Chunks are distributed by lazy work stealing.
-- For instance, dividing the list [c1,c2,c3,c4,c5] into chunks
-- of size 3 results in the following list of chunks [[c1,c2,c3],
-- [c4,c5]].
parClosureListChunked :: Int -> Closure (Strategy (Closure a)) -> Strategy [Closure a]
-- | parClosureListSliced n evaluates n slices of a list
-- of Closures in parallel according to the given strategy (wrapped in a
-- Closure). Slices are distributed by lazy work stealing. For instance,
-- dividing the list [c1,c2,c3,c4,c5] into 3 slices results in
-- the following list of slices [[c1,c4], [c2,c5], [c3]].
parClosureListSliced :: Int -> Closure (Strategy (Closure a)) -> Strategy [Closure a]
-- | Task farm, evaluates tasks (function Closure applied to an element of
-- the input list) in parallel and according to the given strategy
-- (wrapped in a Closure). Note that parMap should only be used
-- if the terms in the input list are already in normal form, as they may
-- be forced sequentially otherwise.
parMap :: ToClosure a => Closure (Strategy (Closure b)) -> Closure (a -> b) -> [a] -> Par [b]
-- | Specialisation of parMap to the fully forcing Closure
-- strategy. That is, parMapNF forces every element of the
-- output list to normalform.
parMapNF :: (ToClosure a, ForceCC b) => Closure (a -> b) -> [a] -> Par [b]
-- | Chunking task farm, divides the input list into chunks of given size
-- and evaluates tasks (function Closure mapped on a chunk of the input
-- list) in parallel and according to the given strategy (wrapped in a
-- Closure). parMapChunked should only be used if the terms in
-- the input list are already in normal form.
parMapChunked :: ToClosure a => Int -> Closure (Strategy (Closure b)) -> Closure (a -> b) -> [a] -> Par [b]
-- | Specialisation of parMapChunked to the fully forcing
-- Closure strategy.
parMapChunkedNF :: (ToClosure a, ForceCC b) => Int -> Closure (a -> b) -> [a] -> Par [b]
-- | Slicing task farm, divides the input list into given number of slices
-- and evaluates tasks (function Closure mapped on a slice of the input
-- list) in parallel and according to the given strategy (wrapped in a
-- Closure). parMapSliced should only be used if the terms in
-- the input list are already in normal form.
parMapSliced :: ToClosure a => Int -> Closure (Strategy (Closure b)) -> Closure (a -> b) -> [a] -> Par [b]
-- | Specialisation of parMapSliced to the fully forcing
-- Closure strategy.
parMapSlicedNF :: (ToClosure a, ForceCC b) => Int -> Closure (a -> b) -> [a] -> Par [b]
-- | Monadic task farm for Closures, evaluates tasks
-- (Par-monadic function Closure applied to a Closure of
-- the input list) in parallel. Note the absence of a strategy argument;
-- strategies aren't needed because they can be baked into the monadic
-- function Closure.
parClosureMapM :: Closure (Closure a -> Par (Closure b)) -> [Closure a] -> Par [Closure b]
-- | Monadic task farm, evaluates tasks (Par-monadic
-- function Closure applied to an element of the input list) in parallel.
-- Note the absence of a strategy argument; strategies aren't needed
-- because they can be baked into the monadic function Closure.
-- parMap should only be used if the terms in the input list are
-- already in normal form, as they may be forced sequentially otherwise.
parMapM :: ToClosure a => Closure (a -> Par (Closure b)) -> [a] -> Par [b]
-- | Specialisation of parMapM, not returning any result.
parMapM_ :: ToClosure a => Closure (a -> Par b) -> [a] -> Par ()
-- | Task farm like parMap but pushes tasks in a
-- round-robin fashion to the given list of nodes.
pushMap :: ToClosure a => Closure (Strategy (Closure b)) -> [NodeId] -> Closure (a -> b) -> [a] -> Par [b]
-- | Task farm like parMapNF but pushes tasks in a
-- round-robin fashion to the given list of nodes.
pushMapNF :: (ToClosure a, ForceCC b) => [NodeId] -> Closure (a -> b) -> [a] -> Par [b]
-- | Monadic task farm for Closures like parClosureMapM but
-- pushes tasks in a round-robin fashion to the given list of nodes.
pushClosureMapM :: [NodeId] -> Closure (Closure a -> Par (Closure b)) -> [Closure a] -> Par [Closure b]
-- | Monadic task farm like parMapM but pushes tasks in a
-- round-robin fashion to the given list of nodes.
pushMapM :: ToClosure a => [NodeId] -> Closure (a -> Par (Closure b)) -> [a] -> Par [b]
-- | Monadic task farm like parMapM_ but pushes tasks in a
-- round-robin fashion to the given list of nodes.
pushMapM_ :: ToClosure a => [NodeId] -> Closure (a -> Par b) -> [a] -> Par ()
-- | Monadic task farm for Closures like parClosureMapM but
-- pushes to random nodes on the given list.
pushRandClosureMapM :: [NodeId] -> Closure (Closure a -> Par (Closure b)) -> [Closure a] -> Par [Closure b]
-- | Monadic task farm like parMapM but pushes to random
-- nodes on the given list.
pushRandMapM :: ToClosure a => [NodeId] -> Closure (a -> Par (Closure b)) -> [a] -> Par [b]
-- | Monadic task farm like parMapM_ but pushes to random
-- nodes on the given list.
pushRandMapM_ :: ToClosure a => [NodeId] -> Closure (a -> Par b) -> [a] -> Par ()
-- | Sequential divide-and-conquer skeleton. didvideAndConquer trivial
-- decompose combine f x repeatedly decomposes the problem
-- x until trivial, applies f to the trivial
-- sub-problems and combines the solutions.
divideAndConquer :: (a -> Bool) -> (a -> [a]) -> (a -> [b] -> b) -> (a -> b) -> a -> b
-- | Parallel divide-and-conquer skeleton with lazy work distribution.
-- parDivideAndConquer trivial_clo decompose_clo combine_clo f_clo
-- x follows the divide-and-conquer pattern of
-- divideAndConquer except that, for technical reasons,
-- all arguments are Closures.
parDivideAndConquer :: Closure (Closure a -> Bool) -> Closure (Closure a -> [Closure a]) -> Closure (Closure a -> [Closure b] -> Closure b) -> Closure (Closure a -> Par (Closure b)) -> Closure a -> Par (Closure b)
-- | Parallel divide-and-conquer skeleton with eager random work
-- distribution, pushing work to the given list of nodes.
-- pushDivideAndConquer nodes trivial_clo decompose_clo combine_clo
-- f_clo x follows the divide-and-conquer pattern of
-- divideAndConquer except that, for technical reasons,
-- all arguments are Closures.
pushDivideAndConquer :: [NodeId] -> Closure (Closure a -> Bool) -> Closure (Closure a -> [Closure a]) -> Closure (Closure a -> [Closure b] -> Closure b) -> Closure (Closure a -> Par (Closure b)) -> Closure a -> Par (Closure b)
declareStatic :: StaticDecl
instance ForceCC (Closure a)
instance ToClosure [Closure a]