Safe Haskell | None |
---|---|
Language | Haskell2010 |
Synopsis
- data KernelConstants = KernelConstants {
- kernelGlobalThreadId :: TExp Int32
- kernelLocalThreadId :: TExp Int32
- kernelGroupId :: TExp Int32
- kernelGlobalThreadIdVar :: VName
- kernelLocalThreadIdVar :: VName
- kernelGroupIdVar :: VName
- kernelNumGroupsCount :: Count NumGroups SubExp
- kernelGroupSizeCount :: Count GroupSize SubExp
- kernelNumGroups :: TExp Int64
- kernelGroupSize :: TExp Int64
- kernelNumThreads :: TExp Int32
- kernelWaveSize :: TExp Int32
- kernelThreadActive :: TExp Bool
- kernelLocalIdMap :: Map [SubExp] [TExp Int32]
- kernelChunkItersMap :: Map [SubExp] (TExp Int32)
- keyWithEntryPoint :: Maybe Name -> Name -> Name
- type CallKernelGen = ImpM GPUMem HostEnv HostOp
- type InKernelGen = ImpM GPUMem KernelEnv KernelOp
- data Locks = Locks {
- locksArray :: VName
- locksCount :: Int
- data HostEnv = HostEnv {}
- data Target
- data KernelEnv = KernelEnv {}
- computeThreadChunkSize :: SplitOrdering -> TExp Int64 -> Count Elements (TExp Int64) -> Count Elements (TExp Int64) -> TV Int64 -> ImpM rep r op ()
- groupReduce :: TExp Int32 -> Lambda GPUMem -> [VName] -> InKernelGen ()
- groupScan :: Maybe (TExp Int32 -> TExp Int32 -> TExp Bool) -> TExp Int64 -> TExp Int64 -> Lambda GPUMem -> [VName] -> InKernelGen ()
- isActive :: [(VName, SubExp)] -> TExp Bool
- sKernelThread :: String -> VName -> KernelAttrs -> InKernelGen () -> CallKernelGen ()
- sKernelGroup :: String -> VName -> KernelAttrs -> InKernelGen () -> CallKernelGen ()
- data KernelAttrs = KernelAttrs {}
- defKernelAttrs :: Count NumGroups SubExp -> Count GroupSize SubExp -> KernelAttrs
- sReplicate :: VName -> SubExp -> CallKernelGen ()
- sIota :: VName -> TExp Int64 -> Exp -> Exp -> IntType -> CallKernelGen ()
- sCopy :: CopyCompiler GPUMem HostEnv HostOp
- compileThreadResult :: SegSpace -> PatElem LetDecMem -> KernelResult -> InKernelGen ()
- compileGroupResult :: SegSpace -> PatElem LetDecMem -> KernelResult -> InKernelGen ()
- virtualiseGroups :: SegVirt -> TExp Int32 -> (TExp Int32 -> InKernelGen ()) -> InKernelGen ()
- kernelLoop :: IntExp t => TExp t -> TExp t -> TExp t -> (TExp t -> InKernelGen ()) -> InKernelGen ()
- groupCoverSpace :: IntExp t => [TExp t] -> ([TExp t] -> InKernelGen ()) -> InKernelGen ()
- data Precomputed
- precomputeConstants :: Count GroupSize (TExp Int64) -> Stms GPUMem -> CallKernelGen Precomputed
- precomputedConstants :: Precomputed -> InKernelGen a -> InKernelGen a
- atomicUpdateLocking :: AtomicBinOp -> Lambda GPUMem -> AtomicUpdate GPUMem KernelEnv
- type AtomicBinOp = BinOp -> Maybe (VName -> VName -> Count Elements (TExp Int64) -> Exp -> AtomicOp)
- data Locking = Locking {
- lockingArray :: VName
- lockingIsUnlocked :: TExp Int32
- lockingToLock :: TExp Int32
- lockingToUnlock :: TExp Int32
- lockingMapping :: [TExp Int64] -> [TExp Int64]
- data AtomicUpdate rep r
- = AtomicPrim (DoAtomicUpdate rep r)
- | AtomicCAS (DoAtomicUpdate rep r)
- | AtomicLocking (Locking -> DoAtomicUpdate rep r)
- type DoAtomicUpdate rep r = Space -> [VName] -> [TExp Int64] -> ImpM rep r KernelOp ()
Documentation
data KernelConstants Source #
KernelConstants | |
|
Information about the locks available for accumulators.
Locks | |
|
HostEnv | |
|
Which target are we ultimately generating code for? While most of the kernels code is the same, there are some cases where we generate special code based on the ultimate low-level API we are targeting.
computeThreadChunkSize :: SplitOrdering -> TExp Int64 -> Count Elements (TExp Int64) -> Count Elements (TExp Int64) -> TV Int64 -> ImpM rep r op () Source #
groupReduce :: TExp Int32 -> Lambda GPUMem -> [VName] -> InKernelGen () Source #
groupScan :: Maybe (TExp Int32 -> TExp Int32 -> TExp Bool) -> TExp Int64 -> TExp Int64 -> Lambda GPUMem -> [VName] -> InKernelGen () Source #
sKernelThread :: String -> VName -> KernelAttrs -> InKernelGen () -> CallKernelGen () Source #
sKernelGroup :: String -> VName -> KernelAttrs -> InKernelGen () -> CallKernelGen () Source #
data KernelAttrs Source #
Various extra configuration of the kernel being generated.
KernelAttrs | |
|
defKernelAttrs :: Count NumGroups SubExp -> Count GroupSize SubExp -> KernelAttrs Source #
The default kernel attributes.
sReplicate :: VName -> SubExp -> CallKernelGen () Source #
Perform a Replicate with a kernel.
sIota :: VName -> TExp Int64 -> Exp -> Exp -> IntType -> CallKernelGen () Source #
Perform an Iota with a kernel.
compileThreadResult :: SegSpace -> PatElem LetDecMem -> KernelResult -> InKernelGen () Source #
compileGroupResult :: SegSpace -> PatElem LetDecMem -> KernelResult -> InKernelGen () Source #
virtualiseGroups :: SegVirt -> TExp Int32 -> (TExp Int32 -> InKernelGen ()) -> InKernelGen () Source #
For many kernels, we may not have enough physical groups to cover the logical iteration space. Some groups thus have to perform double duty; we put an outer loop to accomplish this. The advantage over just launching a bazillion threads is that the cost of memory expansion should be proportional to the number of *physical* threads (hardware parallelism), not the amount of application parallelism.
kernelLoop :: IntExp t => TExp t -> TExp t -> TExp t -> (TExp t -> InKernelGen ()) -> InKernelGen () Source #
Assign iterations of a for-loop to all threads in the kernel.
The passed-in function is invoked with the (symbolic) iteration.
The body must contain thread-level code. For multidimensional
loops, use groupCoverSpace
.
groupCoverSpace :: IntExp t => [TExp t] -> ([TExp t] -> InKernelGen ()) -> InKernelGen () Source #
Iterate collectively though a multidimensional space, such that all threads in the group participate. The passed-in function is invoked with a (symbolic) point in the index space.
data Precomputed Source #
Various useful precomputed information.
precomputeConstants :: Count GroupSize (TExp Int64) -> Stms GPUMem -> CallKernelGen Precomputed Source #
Precompute various constants and useful information.
precomputedConstants :: Precomputed -> InKernelGen a -> InKernelGen a Source #
Make use of various precomputed constants.
atomicUpdateLocking :: AtomicBinOp -> Lambda GPUMem -> AtomicUpdate GPUMem KernelEnv Source #
Do an atomic update corresponding to a binary operator lambda.
type AtomicBinOp = BinOp -> Maybe (VName -> VName -> Count Elements (TExp Int64) -> Exp -> AtomicOp) Source #
Locking strategy used for an atomic update.
Locking | |
|
data AtomicUpdate rep r Source #
The mechanism that will be used for performing the atomic update. Approximates how efficient it will be. Ordered from most to least efficient.
AtomicPrim (DoAtomicUpdate rep r) | Supported directly by primitive. |
AtomicCAS (DoAtomicUpdate rep r) | Can be done by efficient swaps. |
AtomicLocking (Locking -> DoAtomicUpdate rep r) | Requires explicit locking. |