-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Bytestring builder with zero intermediate allocation
--
-- Please see README.md.
@package bytezap
@version 1.3.1
module Bytezap
module Bytezap.Common.Generic
data PlusSym f
data PlusSym1 l r
-- | Generic type foldMap using the addition monoid.
type GTFoldMapCAddition f gf = GTFoldMapC PlusSym 0 f gf
-- | Missing byteSwap functions for signed integers.
--
-- We have them for unsigned integers, but not for signed. They should
-- probably be provided, so I'm considering this a compatibility module
-- for the future when we have them.
module Raehik.Compat.Data.Int.ByteSwap
byteSwapI16 :: Int16 -> Int16
byteSwapI32 :: Int32 -> Int32
byteSwapI64 :: Int64 -> Int64
byteSwapI :: Int -> Int
-- | Missing byteSwap functions for unsigned integers.
--
-- Don't know why this one is missing.
module Raehik.Compat.Data.Word.ByteSwap
byteSwap :: Word -> Word
module Raehik.Compat.GHC.Exts.GHC908MemcpyPrimops
copyAddrToAddrNonOverlapping# :: Addr# -> Addr# -> Int# -> State# RealWorld -> State# RealWorld
setAddrRange# :: Addr# -> Int# -> Int# -> State# RealWorld -> State# RealWorld
module Raehik.Compat.GHC.Exts.GHC910UnalignedAddrPrimops
indexWord8OffAddrAsWord16# :: Addr# -> Int# -> Word16#
indexWord8OffAddrAsWord32# :: Addr# -> Int# -> Word32#
indexWord8OffAddrAsWord64# :: Addr# -> Int# -> Word64#
indexWord8OffAddrAsWord# :: Addr# -> Int# -> Word#
indexWord8OffAddrAsInt16# :: Addr# -> Int# -> Int16#
indexWord8OffAddrAsInt32# :: Addr# -> Int# -> Int32#
indexWord8OffAddrAsInt64# :: Addr# -> Int# -> Int64#
indexWord8OffAddrAsInt# :: Addr# -> Int# -> Int#
readWord8OffAddrAsWord16# :: Addr# -> Int# -> State# d -> (# State# d, Word16# #)
readWord8OffAddrAsWord32# :: Addr# -> Int# -> State# d -> (# State# d, Word32# #)
readWord8OffAddrAsWord64# :: Addr# -> Int# -> State# d -> (# State# d, Word64# #)
readWord8OffAddrAsWord# :: Addr# -> Int# -> State# d -> (# State# d, Word# #)
readWord8OffAddrAsInt16# :: Addr# -> Int# -> State# d -> (# State# d, Int16# #)
readWord8OffAddrAsInt32# :: Addr# -> Int# -> State# d -> (# State# d, Int32# #)
readWord8OffAddrAsInt64# :: Addr# -> Int# -> State# d -> (# State# d, Int64# #)
readWord8OffAddrAsInt# :: Addr# -> Int# -> State# d -> (# State# d, Int# #)
writeWord8OffAddrAsWord16# :: Addr# -> Int# -> Word16# -> State# d -> State# d
writeWord8OffAddrAsWord32# :: Addr# -> Int# -> Word32# -> State# d -> State# d
writeWord8OffAddrAsWord64# :: Addr# -> Int# -> Word64# -> State# d -> State# d
writeWord8OffAddrAsWord# :: Addr# -> Int# -> Word# -> State# d -> State# d
writeWord8OffAddrAsInt16# :: Addr# -> Int# -> Int16# -> State# d -> State# d
writeWord8OffAddrAsInt32# :: Addr# -> Int# -> Int32# -> State# d -> State# d
writeWord8OffAddrAsInt64# :: Addr# -> Int# -> Int64# -> State# d -> State# d
writeWord8OffAddrAsInt# :: Addr# -> Int# -> Int# -> State# d -> State# d
module Raehik.Compat.Data.Primitive.Types
-- | Prim extension class providing unaligned accesses
--
-- hoping to get this merged in
-- https://github.com/haskell/primitive/issues/409
--
-- (also includes Addr# primops which that issue/PR may not)
--
-- Also includes an associated type for size in bytes. Another thing that
-- maybe primitive could provide. (Wouldn't be hard!)
class Prim a => Prim' a where {
type SizeOf a :: Natural;
}
-- | Read a value from the array. The offset is in bytes.
indexWord8ByteArrayAs# :: Prim' a => ByteArray# -> Int# -> a
readWord8ByteArrayAs# :: Prim' a => MutableByteArray# s -> Int# -> State# s -> (# State# s, a #)
writeWord8ByteArrayAs# :: Prim' a => MutableByteArray# s -> Int# -> a -> State# s -> State# s
indexWord8OffAddrAs# :: Prim' a => Addr# -> Int# -> a
readWord8OffAddrAs# :: Prim' a => Addr# -> Int# -> State# s -> (# State# s, a #)
writeWord8OffAddrAs# :: Prim' a => Addr# -> Int# -> a -> State# s -> State# s
-- | Class of types supporting primitive array operations. This includes
-- interfacing with GC-managed memory (functions suffixed with
-- ByteArray#) and interfacing with unmanaged memory (functions
-- suffixed with Addr#). Endianness is platform-dependent.
class () => Prim a
-- | Size of values of type a. The argument is not used.
sizeOf# :: Prim a => a -> Int#
-- | Alignment of values of type a. The argument is not used.
alignment# :: Prim a => a -> Int#
-- | Read a value from the array. The offset is in elements of type
-- a rather than in bytes.
indexByteArray# :: Prim a => ByteArray# -> Int# -> a
-- | Read a value from the mutable array. The offset is in elements of type
-- a rather than in bytes.
readByteArray# :: Prim a => MutableByteArray# s -> Int# -> State# s -> (# State# s, a #)
-- | Write a value to the mutable array. The offset is in elements of type
-- a rather than in bytes.
writeByteArray# :: Prim a => MutableByteArray# s -> Int# -> a -> State# s -> State# s
-- | Fill a slice of the mutable array with a value. The offset and length
-- of the chunk are in elements of type a rather than in bytes.
setByteArray# :: Prim a => MutableByteArray# s -> Int# -> Int# -> a -> State# s -> State# s
-- | Read a value from a memory position given by an address and an offset.
-- The memory block the address refers to must be immutable. The offset
-- is in elements of type a rather than in bytes.
indexOffAddr# :: Prim a => Addr# -> Int# -> a
-- | Read a value from a memory position given by an address and an offset.
-- The offset is in elements of type a rather than in bytes.
readOffAddr# :: Prim a => Addr# -> Int# -> State# s -> (# State# s, a #)
-- | Write a value to a memory position given by an address and an offset.
-- The offset is in elements of type a rather than in bytes.
writeOffAddr# :: Prim a => Addr# -> Int# -> a -> State# s -> State# s
-- | Fill a memory block given by an address, an offset and a length. The
-- offset and length are in elements of type a rather than in
-- bytes.
setOffAddr# :: Prim a => Addr# -> Int# -> Int# -> a -> State# s -> State# s
-- | Size of values of type a. The argument is not used.
--
-- This function has existed since 0.1, but was moved from
-- Primitive to Types in version 0.6.3.0.
sizeOf :: Prim a => a -> Int
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word8
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word16
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word32
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word64
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int8
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int16
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int32
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int64
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Types.Word
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Types.Int
-- | I think this should be in primitive.
module Raehik.Compat.Data.Primitive.Types.Endian
-- | Boxed types which permit reversing byte order ("byte swapping").
class ByteSwap a
byteSwap :: ByteSwap a => a -> a
newtype ByteOrdered (end :: ByteOrder) a
ByteOrdered :: a -> ByteOrdered (end :: ByteOrder) a
[unByteOrdered] :: ByteOrdered (end :: ByteOrder) a -> a
-- | Newtype for easier instance derivation.
newtype PrimByteSwapped a
PrimByteSwapped :: a -> PrimByteSwapped a
[unPrimByteSwapped] :: PrimByteSwapped a -> a
instance GHC.Real.Integral a => GHC.Real.Integral (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Enum.Enum a => GHC.Enum.Enum (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Real.Real a => GHC.Real.Real (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Num.Num a => GHC.Num.Num (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Show.Show a => GHC.Show.Show (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Classes.Eq a => GHC.Classes.Eq (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Classes.Ord a => GHC.Classes.Ord (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance Data.Primitive.Types.Prim a => Data.Primitive.Types.Prim (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.LittleEndian a)
instance Raehik.Compat.Data.Primitive.Types.Prim' a => Raehik.Compat.Data.Primitive.Types.Prim' (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.LittleEndian a)
instance (Data.Primitive.Types.Prim a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Data.Primitive.Types.Prim (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.BigEndian a)
instance (Raehik.Compat.Data.Primitive.Types.Prim' a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Raehik.Compat.Data.Primitive.Types.Prim' (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.BigEndian a)
instance (Data.Primitive.Types.Prim a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Data.Primitive.Types.Prim (Raehik.Compat.Data.Primitive.Types.Endian.PrimByteSwapped a)
instance (Raehik.Compat.Data.Primitive.Types.Prim' a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Raehik.Compat.Data.Primitive.Types.Prim' (Raehik.Compat.Data.Primitive.Types.Endian.PrimByteSwapped a)
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Word.Word16
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Word.Word32
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Word.Word64
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Word
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Int.Int16
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Int.Int32
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Int.Int64
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Int
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Float
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Double
-- | Struct serializer: serialize fields of known length.
--
-- In Haskell-ish terminology, one may consider a C struct to be a
-- product type where each field is of known length. Thus, fields may be
-- accessed by a fixed offset from the struct start. This is convenient
-- for efficient access, since those offsets may be turned into
-- immediates on a register in a MOV instruction.
--
-- Given a struct-like type, we don't need to track "bytes serialized so
-- far" like the general case. We can serialize fields in any order we
-- like, since we know where they will sit in the resulting bytestring.
--
-- This module provides a serializer specifically for these struct-like
-- types. Maybe GHC can write more efficient code for these super-simple
-- types! I have no idea. So I'm trying it, and will compare performance.
--
-- Notably, this serializer is much less flexible. No monoid! I don't
-- really expect anyone to write manual stuff with it-- you should just
-- use the generics. That reminds me, TODO could easily provide some TH
-- too, and again compare.
module Bytezap.Struct
-- | A struct poker: base address (constant), byte offset, state token.
--
-- We could combine base address and byte offset, but we're aiming for
-- code that stores the address in a register and uses immediates to
-- access fields (like a good C compiler will do for its structs). So by
-- keeping them separate, I'm hoping that we can nudge GHC towards such
-- behaviour.
type Poke# s = Addr# -> Int# -> State# s -> State# s
-- | Poke newtype wrapper.
newtype Poke s
Poke :: Poke# s -> Poke s
[unPoke] :: Poke s -> Poke# s
-- | Execute a Poke at a fresh ByteString of the given
-- length.
unsafeRunPokeBS :: Int -> Poke RealWorld -> ByteString
-- | Execute a Poke at a pointer. Returns the number of bytes
-- written.
--
-- The pointer must be a mutable buffer with enough space to hold the
-- poke. Absolutely none of this is checked. Use with caution. Sensible
-- uses:
--
--
-- - implementing pokes to ByteStrings and the like
-- - executing known-length (!!) pokes to known-length (!!) buffers
-- e.g. together with allocaBytes
--
unsafeRunPoke :: MonadPrim s m => Poke s -> Ptr Word8 -> m ()
-- | Poke a type via its Prim' instance.
prim :: forall a s. Prim' a => a -> Poke s
-- | The empty poke. Provided here as we can't provide it via empty.
emptyPoke :: Poke s
-- | Sequence two Pokes. We only require the length of the left
-- poke.
sequencePokes :: Poke s -> Int -> Poke s -> Poke s
-- | essentially memset
replicateByte :: Int -> Word8 -> Poke RealWorld
-- | Efficient type-level bytestring serialization.
--
-- [Natural]s have a convenient syntax, and we can use
-- them as a type-level bytestring by asserting that each Natural
-- is <=255 when reifying. This module provides type classes which
-- give you a serializer for a given [Natural].
--
-- We maximize efficiency by grouping bytes into machine words. We have
-- to be pretty verbose to achieve this. Each type class attempts to
-- group bytes into its machine word type, and if it can't (i.e. not
-- enough bytes remain), it hands off to the next type class which
-- handles the next smaller machine word.
--
-- I did a quick Core check and found that GHC seems to successfully
-- generate minimal code for this e.g. for an 8-byte magic, GHC will do
-- one writeWord64OffAddr# of a constant. Great!
--
-- The only way I can think of to make this faster is to somehow obtain
-- an Addr# with a known length. With that, we could
-- memcpy. But that would be slower for small magics, and maybe
-- others. And I doubt we can conjure up an Addr# at compile
-- time. So I'm fairly confident that this is the best you're gonna get.
module Bytezap.Struct.TypeLits.Bytes
-- | Serialize a type-level bytestring, largest grouping Word64.
class ReifyBytesW64 (ns :: [Natural])
reifyBytesW64 :: ReifyBytesW64 ns => Poke s
-- | Serialize a type-level bytestring, largest grouping Word32.
class ReifyBytesW32 (ns :: [Natural])
reifyBytesW32 :: ReifyBytesW32 ns => Poke s
-- | Serialize a type-level bytestring, largest grouping Word16.
class ReifyBytesW16 (ns :: [Natural])
reifyBytesW16 :: ReifyBytesW16 ns => Poke s
-- | Serialize a type-level bytestring, byte-by-byte.
class ReifyBytesW8 (ns :: [Natural])
reifyBytesW8 :: ReifyBytesW8 ns => Poke s
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 ns => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 ns
instance (Data.Type.Byte.ReifyW8 n1, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 ns) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 (n1 : ns)
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 '[]
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 ns => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 ns
instance (Data.Type.Byte.ReifyW8 n1, Data.Type.Byte.ReifyW8 n2, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 ns) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 (n1 : n2 : ns)
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 ns => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW64 ns
instance (Data.Type.Byte.ReifyW8 n1, Data.Type.Byte.ReifyW8 n2, Data.Type.Byte.ReifyW8 n3, Data.Type.Byte.ReifyW8 n4, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 ns) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 (n1 : n2 : n3 : n4 : ns)
instance (Data.Type.Byte.ReifyW8 n1, Data.Type.Byte.ReifyW8 n2, Data.Type.Byte.ReifyW8 n3, Data.Type.Byte.ReifyW8 n4, Data.Type.Byte.ReifyW8 n5, Data.Type.Byte.ReifyW8 n6, Data.Type.Byte.ReifyW8 n7, Data.Type.Byte.ReifyW8 n8, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW64 ns) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW64 (n1 : n2 : n3 : n4 : n5 : n6 : n7 : n8 : ns)
module Bytezap.Poke
type Poke# s = Addr# -> Int# -> State# s -> (# State# s, Int# #)
-- | Poke newtype wrapper.
newtype Poke s
Poke :: Poke# s -> Poke s
[unPoke] :: Poke s -> Poke# s
-- | Execute a Poke at a fresh ByteString of the given
-- length.
unsafeRunPokeBS :: Int -> Poke RealWorld -> ByteString
-- | Execute a Poke at a fresh ByteString of the given
-- maximum length. Does not reallocate if final size is less than
-- estimated.
unsafeRunPokeBSUptoN :: Int -> Poke RealWorld -> ByteString
-- | Execute a Poke at a pointer. Returns the number of bytes
-- written.
--
-- The pointer must be a mutable buffer with enough space to hold the
-- poke. Absolutely none of this is checked. Use with caution. Sensible
-- uses:
--
--
-- - implementing pokes to ByteStrings and the like
-- - executing known-length (!!) pokes to known-length (!!) buffers
-- e.g. together with allocaBytes
--
unsafeRunPoke :: MonadPrim s m => Poke s -> Ptr Word8 -> m Int
-- | Poke a type via its Prim' instance.
prim :: forall a s. Prim' a => a -> Poke s
byteString :: ByteString -> Poke RealWorld
byteArray# :: ByteArray# -> Int# -> Int# -> Poke s
-- | essentially memset
replicateByte :: Int -> Word8 -> Poke RealWorld
-- | Use a struct poke as a regular poke.
--
-- To do this, we must associate a constant byte length with an existing
-- poker. Note that pokers don't expose the type of the data they are
-- serializing, so this is a very clumsy operation by itself. You should
-- only be using this when you have such types in scope, and the constant
-- length should be obtained in a sensible manner (e.g.
-- KnownSizeOf for generic struct pokers, or your own constant
-- size class if you're doing funky stuff).
fromStructPoke :: Int -> Poke s -> Poke s
-- | Use a struct poke as a regular poke by throwing away the return
-- offset.
toStructPoke :: Poke s -> Poke s
instance GHC.Base.Semigroup (Bytezap.Poke.Poke s)
instance GHC.Base.Monoid (Bytezap.Poke.Poke s)
module Bytezap.Write.Internal
-- | A Poke with the associated size it pokes.
data Write s
Write :: Int -> Poke s -> Write s
[size] :: Write s -> Int
[poke] :: Write s -> Poke s
instance GHC.Base.Semigroup (Bytezap.Write.Internal.Write s)
instance GHC.Base.Monoid (Bytezap.Write.Internal.Write s)
module Bytezap.Write
-- | A Poke with the associated size it pokes.
data Write s
runWriteBS :: Write RealWorld -> ByteString
runWriteBSUptoN :: Write RealWorld -> ByteString
prim :: forall a s. Prim' a => a -> Write s
byteString :: ByteString -> Write RealWorld
byteArray# :: ByteArray# -> Int# -> Int# -> Write s
-- | essentially memset
replicateByte :: Int -> Word8 -> Write RealWorld
module Bytezap.Poke.Derived.Endian
w16le :: Word16 -> Poke s
w16be :: Word16 -> Poke s
w32le :: Word32 -> Poke s
w32be :: Word32 -> Poke s
w64le :: Word64 -> Poke s
w64be :: Word64 -> Poke s
i16le :: Int16 -> Poke s
i16be :: Int16 -> Poke s
i32le :: Int32 -> Poke s
i32be :: Int32 -> Poke s
i64le :: Int64 -> Poke s
i64be :: Int64 -> Poke s
module Bytezap.Poke.Derived
-- | Poke a ShortByteString.
shortByteString :: ShortByteString -> Poke s
-- | Poke a Text.
text :: Text -> Poke s
-- | Poke a Char.
--
-- Adapted from utf8-string.
char :: Char -> Poke s
-- | unsafePokeIndexed pokeAt off n performs n indexed
-- pokes starting from off.
--
-- Does not check bounds. Largely intended for bytewise pokes where some
-- work needs to be performed for each byte (e.g. escaping text and
-- poking inline).
unsafePokeIndexed :: (Int -> Poke s) -> Int -> Int -> Poke s
module Bytezap.Write.Derived
-- | Write a ShortByteString.
shortByteString :: ShortByteString -> Write s
-- | Write a Text.
text :: Text -> Write s
-- | Write a Char.
--
-- Adapted from utf8-string.
char :: Char -> Write s
module Bytezap.Poke.Json
escapedLength8 :: Text -> Int
escapeW8 :: Word8 -> Int
pokeEscapedTextUnquoted :: Text -> Poke s
pokeEscapeW8 :: Word8 -> Poke s
w8AsciiHex :: Word8 -> Poke s
c_lower_hex_table :: Ptr CChar
-- | Struct parser.
--
-- We do still have to do failure checking, because unlike C we check
-- some types (e.g. bitfields). Hopefully inlining can remove those
-- checks when unnecessary.
module Bytezap.Parser.Struct
type PureMode = Proxy# Void
type IOMode = State# RealWorld
type STMode s = State# s
type ParserT# (st :: ZeroBitType) e a = ForeignPtrContents {-^ pointer provenance -} -> Addr# {-^ base address -} -> Int# {-^ cursor offset from base -} -> st {-^ state token -} -> Res# st e a
newtype ParserT (st :: ZeroBitType) e a
ParserT :: ParserT# st e a -> ParserT (st :: ZeroBitType) e a
[runParserT#] :: ParserT (st :: ZeroBitType) e a -> ParserT# st e a
-- | The type of pure parsers.
type Parser = ParserT PureMode
-- | The type of parsers which can embed IO actions.
type ParserIO = ParserT IOMode
-- | The type of parsers which can embed ST actions.
type ParserST s = ParserT (STMode s)
-- | Primitive parser result wrapped with a state token.
--
-- You should rarely need to manipulate values of this type directly. Use
-- the provided bidirectional pattern synonyms OK#, Fail#
-- and Err#.
type Res# (st :: ZeroBitType) e a = (# st, ResI# e a #)
-- | Primitive parser result.
type ResI# e a = (# (# a #) | (# #) | (# e #) #)
-- | Res# constructor for a successful parse. Contains the return
-- value and a state token.
pattern OK# :: (st :: ZeroBitType) -> a -> Res# st e a
-- | Res# constructor for recoverable failure. Contains only a state
-- token.
pattern Fail# :: (st :: ZeroBitType) -> Res# st e a
-- | Res# constructor for errors which are by default
-- non-recoverable. Contains the error, plus a state token.
pattern Err# :: (st :: ZeroBitType) -> e -> Res# st e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParserBs :: forall a e. ByteString -> Parser e a -> Result e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParserPtr :: forall a e. Ptr Word8 -> Parser e a -> Result e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParserFPtr :: forall a e. ForeignPtr Word8 -> Parser e a -> Result e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParser' :: forall a e. Addr# -> ForeignPtrContents -> Parser e a -> Result e a
-- | Higher-level boxed data type for parsing results.
data Result e a
-- | Contains return value.
OK :: a -> Result e a
-- | Recoverable-by-default failure.
Fail :: Result e a
-- | Unrecoverable-by-default error.
Err :: !e -> Result e a
-- | can't provide via pure as no Applicative
constParse :: a -> ParserT st e a
sequenceParsers :: Int -> (a -> b -> c) -> ParserT st e a -> ParserT st e b -> ParserT st e c
prim :: forall a st e. Prim' a => ParserT st e a
-- | parse literal
lit :: Eq a => a -> ParserT st e a -> ParserT st e ()
-- | parse literal (CPS)
withLit :: Eq a => Int# -> a -> ParserT st e a -> ParserT st e r -> ParserT st e r
instance (GHC.Show.Show a, GHC.Show.Show e) => GHC.Show.Show (Bytezap.Parser.Struct.Result e a)
instance GHC.Base.Functor (Bytezap.Parser.Struct.ParserT st e)
-- | Efficient type-level bytestring parsing.
--
-- One may implement this using the type-level serializing, but mirroring
-- it for parsing does less work and allocation.
module Bytezap.Parser.Struct.TypeLits.Bytes
class ParseReifyBytesW64 (ns :: [Natural])
parseReifyBytesW64 :: ParseReifyBytesW64 ns => ParserT st Void ()
-- | Serialize a type-level bytestring, largest grouping Word32.
class ParseReifyBytesW32 (ns :: [Natural])
parseReifyBytesW32 :: ParseReifyBytesW32 ns => ParserT st e ()
-- | Serialize a type-level bytestring, largest grouping Word16.
class ParseReifyBytesW16 (ns :: [Natural])
parseReifyBytesW16 :: ParseReifyBytesW16 ns => ParserT st e ()
-- | Serialize a type-level bytestring, byte-by-byte.
class ParseReifyBytesW8 (ns :: [Natural])
parseReifyBytesW8 :: ParseReifyBytesW8 ns => ParserT st e ()
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 ns => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 ns
instance (Data.Type.Byte.ReifyW8 n1, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 ns) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 (n1 : ns)
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 '[]
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 ns => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 ns
instance (Data.Type.Byte.ReifyW8 n1, Data.Type.Byte.ReifyW8 n2, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 ns) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 (n1 : n2 : ns)
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 ns => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW64 ns
instance (Data.Type.Byte.ReifyW8 n1, Data.Type.Byte.ReifyW8 n2, Data.Type.Byte.ReifyW8 n3, Data.Type.Byte.ReifyW8 n4, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 ns) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 (n1 : n2 : n3 : n4 : ns)
instance (Data.Type.Byte.ReifyW8 n1, Data.Type.Byte.ReifyW8 n2, Data.Type.Byte.ReifyW8 n3, Data.Type.Byte.ReifyW8 n4, Data.Type.Byte.ReifyW8 n5, Data.Type.Byte.ReifyW8 n6, Data.Type.Byte.ReifyW8 n7, Data.Type.Byte.ReifyW8 n8, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW64 ns) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW64 (n1 : n2 : n3 : n4 : n5 : n6 : n7 : n8 : ns)
-- | Handy typenat utils.
module Util.TypeNats
natVal'' :: forall n. KnownNat n => Natural
natValInt :: forall n. KnownNat n => Int
-- | Generics for bytezap's struct serializer.
--
-- We can't use my generic-data-functions library, because we're doing
-- more than just basic monoidal composition. But I still want the same
-- pluggable generics, where the user provides the class to use for base
-- cases. So I do that. However, unlike g-d-f, the class info can't be
-- provided via the user-selected monoid, because you don't select that.
-- Instead, we take a simple "index" type. It's pretty much the same
-- idea, surprisingly. This way, we can provide a few sensible "versions"
-- like in g-f-d, while primarily designing for DIY.
module Bytezap.Struct.Generic
-- | Class for holding info on class to use for poking base cases.
--
-- The type is just used to map to class info. It is never instantiated.
-- By packing KnownSizeOf into here, we don't need to enforce a
-- type-level solution! Now it's up to you how you want to track your
-- constant lengths.
--
-- We stay unboxed here because the internals are unboxed, just for
-- convenience. Maybe this is bad, let me know.
class GPokeBase tag where {
-- | The state token of our poker.
type GPokeBaseSt tag;
-- | The type class that provides base case poking.
--
-- The type class should provide a function that looks like
-- gPokeBase.
type GPokeBaseC tag a :: Constraint;
type GPokeBaseLenTF tag :: Type ~> Natural;
}
gPokeBase :: (GPokeBase tag, GPokeBaseC tag a) => a -> Poke# (GPokeBaseSt tag)
class GPoke tag f
gPoke :: GPoke tag f => f p -> Poke# (GPokeBaseSt tag)
instance forall k1 k2 (tag :: k1) (f :: k2 -> GHC.Types.Type) (c :: GHC.Generics.Meta). Bytezap.Struct.Generic.GPoke tag f => Bytezap.Struct.Generic.GPoke tag (GHC.Generics.D1 c f)
instance forall k1 k2 (tag :: k1) (f :: k2 -> GHC.Types.Type) (c :: GHC.Generics.Meta). Bytezap.Struct.Generic.GPoke tag f => Bytezap.Struct.Generic.GPoke tag (GHC.Generics.C1 c f)
instance forall k1 k2 (tag :: k1) (l :: k2 -> GHC.Types.Type) (r :: k2 -> GHC.Types.Type) (lenL :: GHC.Num.Natural.Natural). (Bytezap.Struct.Generic.GPoke tag l, Bytezap.Struct.Generic.GPoke tag r, Bytezap.Struct.Generic.GPokeBase tag, lenL GHC.Types.~ Bytezap.Common.Generic.GTFoldMapCAddition (Bytezap.Struct.Generic.GPokeBaseLenTF tag) l, GHC.TypeNats.KnownNat lenL) => Bytezap.Struct.Generic.GPoke tag (l GHC.Generics.:*: r)
instance forall k1 k2 (tag :: k1) a (c :: GHC.Generics.Meta). (Bytezap.Struct.Generic.GPokeBase tag, Bytezap.Struct.Generic.GPokeBaseC tag a) => Bytezap.Struct.Generic.GPoke tag (GHC.Generics.S1 c (GHC.Generics.Rec0 a))
instance forall k1 k2 (tag :: k1). Bytezap.Struct.Generic.GPoke tag GHC.Generics.U1
-- | Pokes with type-level poke length.
module Bytezap.Poke.KnownLen
newtype PokeKnownLen (len :: Natural) s
PokeKnownLen :: Poke s -> PokeKnownLen (len :: Natural) s
[unPokeKnownLen] :: PokeKnownLen (len :: Natural) s -> Poke s
mappend' :: PokeKnownLen n s -> PokeKnownLen m s -> PokeKnownLen (n + m) s
mempty' :: PokeKnownLen 0 s
runPokeKnownLenBS :: forall n. KnownNat n => PokeKnownLen n RealWorld -> ByteString
prim :: Prim' a => a -> PokeKnownLen (SizeOf a) s
module Bytezap.Parser.Struct.Generic
class GParseBase tag where {
-- | The state token of the parser.
type GParseBaseSt tag :: ZeroBitType;
type GParseBaseC tag a :: Constraint;
type GParseBaseE tag :: Type;
-- | Defunctionalization symbol for a type family turning Types into
-- Naturals. (Needed as we can't partially apply type families.)
type GParseBaseLenTF tag :: Type ~> Natural;
}
gParseBase :: (GParseBase tag, GParseBaseC tag a) => ParserT (GParseBaseSt tag) (GParseBaseE tag) a
class GParse tag gf
gParse :: GParse tag gf => ParserT (GParseBaseSt tag) (GParseBaseE tag) (gf p)
instance forall k1 k2 (tag :: k1) (gf :: k2 -> GHC.Types.Type) (cd :: GHC.Generics.Meta). Bytezap.Parser.Struct.Generic.GParse tag gf => Bytezap.Parser.Struct.Generic.GParse tag (GHC.Generics.D1 cd gf)
instance forall k1 k2 (tag :: k1) (gf :: k2 -> GHC.Types.Type) (cc :: GHC.Generics.Meta). Bytezap.Parser.Struct.Generic.GParse tag gf => Bytezap.Parser.Struct.Generic.GParse tag (GHC.Generics.C1 cc gf)
instance forall k1 k2 (tag :: k1) (l :: k2 -> GHC.Types.Type) (r :: k2 -> GHC.Types.Type) (lenL :: GHC.Num.Natural.Natural). (Bytezap.Parser.Struct.Generic.GParse tag l, Bytezap.Parser.Struct.Generic.GParse tag r, Bytezap.Parser.Struct.Generic.GParseBase tag, lenL GHC.Types.~ Bytezap.Common.Generic.GTFoldMapCAddition (Bytezap.Parser.Struct.Generic.GParseBaseLenTF tag) l, GHC.TypeNats.KnownNat lenL) => Bytezap.Parser.Struct.Generic.GParse tag (l GHC.Generics.:*: r)
instance forall k1 k2 (tag :: k1) a (c :: GHC.Generics.Meta). (Bytezap.Parser.Struct.Generic.GParseBase tag, Bytezap.Parser.Struct.Generic.GParseBaseC tag a) => Bytezap.Parser.Struct.Generic.GParse tag (GHC.Generics.S1 c (GHC.Generics.Rec0 a))
instance forall k1 k2 (tag :: k1). Bytezap.Parser.Struct.Generic.GParse tag GHC.Generics.U1