-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Bytestring builder with zero intermediate allocation
--
-- Please see README.md.
@package bytezap
@version 1.4.0
module Bytezap
module Bytezap.Common.Generic
data PlusSym f
data PlusSym1 l r
-- | Generic type foldMap using the addition monoid.
type GTFoldMapCAddition f gf = GTFoldMapC PlusSym 0 f gf
-- | Missing byteSwap functions for signed integers.
--
-- We have them for unsigned integers, but not for signed. They should
-- probably be provided, so I'm considering this a compatibility module
-- for the future when we have them.
module Raehik.Compat.Data.Int.ByteSwap
byteSwapI16 :: Int16 -> Int16
byteSwapI32 :: Int32 -> Int32
byteSwapI64 :: Int64 -> Int64
byteSwapI :: Int -> Int
-- | Missing byteSwap functions for unsigned integers.
--
-- Don't know why this one is missing.
module Raehik.Compat.Data.Word.ByteSwap
byteSwap :: Word -> Word
module Raehik.Compat.GHC.Exts.GHC908MemcpyPrimops
copyAddrToAddrNonOverlapping# :: Addr# -> Addr# -> Int# -> State# RealWorld -> State# RealWorld
setAddrRange# :: Addr# -> Int# -> Int# -> State# RealWorld -> State# RealWorld
module Raehik.Compat.GHC.Exts.GHC910UnalignedAddrPrimops
indexWord8OffAddrAsWord16# :: Addr# -> Int# -> Word16#
indexWord8OffAddrAsWord32# :: Addr# -> Int# -> Word32#
indexWord8OffAddrAsWord64# :: Addr# -> Int# -> Word64#
indexWord8OffAddrAsWord# :: Addr# -> Int# -> Word#
indexWord8OffAddrAsInt16# :: Addr# -> Int# -> Int16#
indexWord8OffAddrAsInt32# :: Addr# -> Int# -> Int32#
indexWord8OffAddrAsInt64# :: Addr# -> Int# -> Int64#
indexWord8OffAddrAsInt# :: Addr# -> Int# -> Int#
readWord8OffAddrAsWord16# :: Addr# -> Int# -> State# d -> (# State# d, Word16# #)
readWord8OffAddrAsWord32# :: Addr# -> Int# -> State# d -> (# State# d, Word32# #)
readWord8OffAddrAsWord64# :: Addr# -> Int# -> State# d -> (# State# d, Word64# #)
readWord8OffAddrAsWord# :: Addr# -> Int# -> State# d -> (# State# d, Word# #)
readWord8OffAddrAsInt16# :: Addr# -> Int# -> State# d -> (# State# d, Int16# #)
readWord8OffAddrAsInt32# :: Addr# -> Int# -> State# d -> (# State# d, Int32# #)
readWord8OffAddrAsInt64# :: Addr# -> Int# -> State# d -> (# State# d, Int64# #)
readWord8OffAddrAsInt# :: Addr# -> Int# -> State# d -> (# State# d, Int# #)
writeWord8OffAddrAsWord16# :: Addr# -> Int# -> Word16# -> State# d -> State# d
writeWord8OffAddrAsWord32# :: Addr# -> Int# -> Word32# -> State# d -> State# d
writeWord8OffAddrAsWord64# :: Addr# -> Int# -> Word64# -> State# d -> State# d
writeWord8OffAddrAsWord# :: Addr# -> Int# -> Word# -> State# d -> State# d
writeWord8OffAddrAsInt16# :: Addr# -> Int# -> Int16# -> State# d -> State# d
writeWord8OffAddrAsInt32# :: Addr# -> Int# -> Int32# -> State# d -> State# d
writeWord8OffAddrAsInt64# :: Addr# -> Int# -> Int64# -> State# d -> State# d
writeWord8OffAddrAsInt# :: Addr# -> Int# -> Int# -> State# d -> State# d
module Raehik.Compat.Data.Primitive.Types
-- | Prim extension class providing unaligned accesses
--
-- hoping to get this merged in
-- https://github.com/haskell/primitive/issues/409
--
-- (also includes Addr# primops which that issue/PR may not)
--
-- Also includes an associated type for size in bytes. Another thing that
-- maybe primitive could provide. (Wouldn't be hard!)
class Prim a => Prim' a where {
type SizeOf a :: Natural;
}
-- | Read a value from the array. The offset is in bytes.
indexWord8ByteArrayAs# :: Prim' a => ByteArray# -> Int# -> a
readWord8ByteArrayAs# :: Prim' a => MutableByteArray# s -> Int# -> State# s -> (# State# s, a #)
writeWord8ByteArrayAs# :: Prim' a => MutableByteArray# s -> Int# -> a -> State# s -> State# s
indexWord8OffAddrAs# :: Prim' a => Addr# -> Int# -> a
readWord8OffAddrAs# :: Prim' a => Addr# -> Int# -> State# s -> (# State# s, a #)
writeWord8OffAddrAs# :: Prim' a => Addr# -> Int# -> a -> State# s -> State# s
-- | Class of types supporting primitive array operations. This includes
-- interfacing with GC-managed memory (functions suffixed with
-- ByteArray#) and interfacing with unmanaged memory (functions
-- suffixed with Addr#). Endianness is platform-dependent.
class () => Prim a
-- | Size of values of type a. The argument is not used.
sizeOf# :: Prim a => a -> Int#
-- | Alignment of values of type a. The argument is not used.
alignment# :: Prim a => a -> Int#
-- | Read a value from the array. The offset is in elements of type
-- a rather than in bytes.
indexByteArray# :: Prim a => ByteArray# -> Int# -> a
-- | Read a value from the mutable array. The offset is in elements of type
-- a rather than in bytes.
readByteArray# :: Prim a => MutableByteArray# s -> Int# -> State# s -> (# State# s, a #)
-- | Write a value to the mutable array. The offset is in elements of type
-- a rather than in bytes.
writeByteArray# :: Prim a => MutableByteArray# s -> Int# -> a -> State# s -> State# s
-- | Fill a slice of the mutable array with a value. The offset and length
-- of the chunk are in elements of type a rather than in bytes.
setByteArray# :: Prim a => MutableByteArray# s -> Int# -> Int# -> a -> State# s -> State# s
-- | Read a value from a memory position given by an address and an offset.
-- The memory block the address refers to must be immutable. The offset
-- is in elements of type a rather than in bytes.
indexOffAddr# :: Prim a => Addr# -> Int# -> a
-- | Read a value from a memory position given by an address and an offset.
-- The offset is in elements of type a rather than in bytes.
readOffAddr# :: Prim a => Addr# -> Int# -> State# s -> (# State# s, a #)
-- | Write a value to a memory position given by an address and an offset.
-- The offset is in elements of type a rather than in bytes.
writeOffAddr# :: Prim a => Addr# -> Int# -> a -> State# s -> State# s
-- | Fill a memory block given by an address, an offset and a length. The
-- offset and length are in elements of type a rather than in
-- bytes.
setOffAddr# :: Prim a => Addr# -> Int# -> Int# -> a -> State# s -> State# s
-- | Size of values of type a. The argument is not used.
--
-- This function has existed since 0.1, but was moved from
-- Primitive to Types in version 0.6.3.0.
sizeOf :: Prim a => a -> Int
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word8
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word16
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word32
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Word.Word64
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int8
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int16
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int32
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Int.Int64
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Types.Word
instance Raehik.Compat.Data.Primitive.Types.Prim' GHC.Types.Int
-- | I think this should be in primitive.
module Raehik.Compat.Data.Primitive.Types.Endian
-- | Boxed types which permit reversing byte order ("byte swapping").
class ByteSwap a
byteSwap :: ByteSwap a => a -> a
newtype ByteOrdered (end :: ByteOrder) a
ByteOrdered :: a -> ByteOrdered (end :: ByteOrder) a
[unByteOrdered] :: ByteOrdered (end :: ByteOrder) a -> a
-- | Newtype for easier instance derivation.
newtype PrimByteSwapped a
PrimByteSwapped :: a -> PrimByteSwapped a
[unPrimByteSwapped] :: PrimByteSwapped a -> a
instance GHC.Real.Integral a => GHC.Real.Integral (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Enum.Enum a => GHC.Enum.Enum (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Real.Real a => GHC.Real.Real (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Num.Num a => GHC.Num.Num (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Show.Show a => GHC.Show.Show (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Classes.Eq a => GHC.Classes.Eq (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance GHC.Classes.Ord a => GHC.Classes.Ord (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered end a)
instance Data.Primitive.Types.Prim a => Data.Primitive.Types.Prim (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.LittleEndian a)
instance Raehik.Compat.Data.Primitive.Types.Prim' a => Raehik.Compat.Data.Primitive.Types.Prim' (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.LittleEndian a)
instance (Data.Primitive.Types.Prim a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Data.Primitive.Types.Prim (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.BigEndian a)
instance (Raehik.Compat.Data.Primitive.Types.Prim' a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Raehik.Compat.Data.Primitive.Types.Prim' (Raehik.Compat.Data.Primitive.Types.Endian.ByteOrdered 'GHC.ByteOrder.BigEndian a)
instance (Data.Primitive.Types.Prim a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Data.Primitive.Types.Prim (Raehik.Compat.Data.Primitive.Types.Endian.PrimByteSwapped a)
instance (Raehik.Compat.Data.Primitive.Types.Prim' a, Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap a) => Raehik.Compat.Data.Primitive.Types.Prim' (Raehik.Compat.Data.Primitive.Types.Endian.PrimByteSwapped a)
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Word.Word16
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Word.Word32
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Word.Word64
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Word
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Int.Int16
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Int.Int32
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Int.Int64
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Int
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Float
instance Raehik.Compat.Data.Primitive.Types.Endian.ByteSwap GHC.Types.Double
-- | Struct serializer: serialize fields of known length.
--
-- In Haskell-ish terminology, one may consider a C struct to be a
-- product type where each field is of known length. Thus, fields may be
-- accessed by a fixed offset from the struct start. This is convenient
-- for efficient access, since those offsets may be turned into
-- immediates on a register in a MOV instruction.
--
-- Given a struct-like type, we don't need to track "bytes serialized so
-- far" like the general case. We can serialize fields in any order we
-- like, since we know where they will sit in the resulting bytestring.
--
-- This module provides a serializer specifically for these struct-like
-- types. Maybe GHC can write more efficient code for these super-simple
-- types! I have no idea. So I'm trying it, and will compare performance.
--
-- Notably, this serializer is much less flexible. No monoid! I don't
-- really expect anyone to write manual stuff with it-- you should just
-- use the generics. That reminds me, TODO could easily provide some TH
-- too, and again compare.
module Bytezap.Struct
-- | A struct poker: base address (constant), byte offset, state token.
--
-- We could combine base address and byte offset, but we're aiming for
-- code that stores the address in a register and uses immediates to
-- access fields (like a good C compiler will do for its structs). So by
-- keeping them separate, I'm hoping that we can nudge GHC towards such
-- behaviour.
type Poke# s = Addr# -> Int# -> State# s -> State# s
-- | Poke newtype wrapper.
newtype Poke s
Poke :: Poke# s -> Poke s
[unPoke] :: Poke s -> Poke# s
-- | Execute a Poke at a fresh ByteString of the given
-- length.
unsafeRunPokeBS :: Int -> Poke RealWorld -> ByteString
-- | Execute a Poke at a pointer. Returns the number of bytes
-- written.
--
-- The pointer must be a mutable buffer with enough space to hold the
-- poke. Absolutely none of this is checked. Use with caution. Sensible
-- uses:
--
--
-- - implementing pokes to ByteStrings and the like
-- - executing known-length (!!) pokes to known-length (!!) buffers
-- e.g. together with allocaBytes
--
unsafeRunPoke :: MonadPrim s m => Poke s -> Ptr Word8 -> m ()
-- | Poke a type via its Prim' instance.
prim :: forall a s. Prim' a => a -> Poke s
-- | The empty poke. Provided here as we can't provide it via empty.
emptyPoke :: Poke s
-- | Sequence two Pokes. We only require the length of the left
-- poke.
sequencePokes :: Poke s -> Int -> Poke s -> Poke s
-- | essentially memset
replicateByte :: Int -> Word8 -> Poke RealWorld
-- | Efficient type-level bytestring serialization via chunking.
--
-- I did a quick Core check and found that GHC seems to successfully
-- generate minimal code for this e.g. for an 8-byte magic, GHC will do
-- one writeWord64OffAddr# of a constant. Great!
--
-- The only way I can think of to make this faster is to somehow obtain
-- an Addr# with a known length. With that, we could
-- memcpy. But that would be slower for small magics, and maybe
-- others. And I doubt we can conjure up an Addr# at compile
-- time. So I'm fairly confident that this is the best you're gonna get.
module Bytezap.Struct.TypeLits.Bytes
-- | Serialize a type-level bytestring, largest grouping Word64.
class ReifyBytesW64 (bs :: [Natural])
reifyBytesW64 :: ReifyBytesW64 bs => Poke s
-- | Serialize a type-level bytestring, largest grouping Word32.
class ReifyBytesW32 (bs :: [Natural])
reifyBytesW32 :: ReifyBytesW32 bs => Poke s
-- | Serialize a type-level bytestring, largest grouping Word16.
class ReifyBytesW16 (bs :: [Natural])
reifyBytesW16 :: ReifyBytesW16 bs => Poke s
-- | Serialize a type-level bytestring, byte-by-byte.
class ReifyBytesW8 (bs :: [Natural])
reifyBytesW8 :: ReifyBytesW8 bs => Poke s
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 bs => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 bs
instance (Data.Type.Byte.ReifyW8 b0, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 bs) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 (b0 : bs)
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW8 '[]
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 bs => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 bs
instance (Data.Type.Byte.ReifyW8 b0, Data.Type.Byte.ReifyW8 b1, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 bs) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW16 (b0 : b1 : bs)
instance Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 bs => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW64 bs
instance (Data.Type.Byte.ReifyW8 b0, Data.Type.Byte.ReifyW8 b1, Data.Type.Byte.ReifyW8 b2, Data.Type.Byte.ReifyW8 b3, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 bs) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW32 (b0 : b1 : b2 : b3 : bs)
instance (Data.Type.Byte.ReifyW8 b0, Data.Type.Byte.ReifyW8 b1, Data.Type.Byte.ReifyW8 b2, Data.Type.Byte.ReifyW8 b3, Data.Type.Byte.ReifyW8 b4, Data.Type.Byte.ReifyW8 b5, Data.Type.Byte.ReifyW8 b6, Data.Type.Byte.ReifyW8 b7, Bytezap.Struct.TypeLits.Bytes.ReifyBytesW64 bs) => Bytezap.Struct.TypeLits.Bytes.ReifyBytesW64 (b0 : b1 : b2 : b3 : b4 : b5 : b6 : b7 : bs)
module Bytezap.Poke
-- | Unboxed buffer write operation.
--
-- The next offset must be greater than or equal to the input buffer
-- offset. This is not checked.
--
-- Note that the only way to find out the length of a write is to perform
-- it. But you can't perform a length without providing a correctly-sized
-- buffer. Thus, you may only use a Poke# when you have a buffer
-- large enough to fit its maximum write length-- which in turn means
-- means you must track write lengths separately. (Write does
-- this.)
--
-- I provide this highly unsafe, seemingly unhelpful type because it's a
-- requirement for Write, and here I can guarantee performance
-- better because I don't need to worry about laziness.
--
-- We cannot be polymorphic on the pointer type unless we box the
-- pointer. We thus limit ourselves to writing to Addr#s, and not
-- MutableByteArray#s. (I figure we're most interested in
-- ByteStrings, which use Addr#.)
--
-- Note that if we did provide write length, then the next offset might
-- appear superfluous. But that next offset is usually already
-- calculated, and may be passed directly to sequenced writes, unlike if
-- we returned a write length which would need to be added to the
-- original offset.
type Poke# s = Addr# {-^ buffer pointer -} -> Int# {-^ buffer offset -} -> State# s {-^ state token -} -> (# State# s, Int# #) {-^ (state token, next offset) -}
-- | Poke newtype wrapper.
newtype Poke s
Poke :: Poke# s -> Poke s
[unPoke] :: Poke s -> Poke# s
-- | Execute a Poke at a fresh ByteString of the given
-- length.
unsafeRunPokeBS :: Int -> Poke RealWorld -> ByteString
-- | Execute a Poke at a fresh ByteString of the given
-- maximum length. Does not reallocate if final size is less than
-- estimated.
unsafeRunPokeBSUptoN :: Int -> Poke RealWorld -> ByteString
-- | Execute a Poke at a pointer. Returns the number of bytes
-- written.
--
-- The pointer must be a mutable buffer with enough space to hold the
-- poke. Absolutely none of this is checked. Use with caution. Sensible
-- uses:
--
--
-- - implementing pokes to ByteStrings and the like
-- - executing known-length (!!) pokes to known-length (!!) buffers
-- e.g. together with allocaBytes
--
unsafeRunPoke :: MonadPrim s m => Poke s -> Ptr Word8 -> m Int
-- | Poke a type via its Prim' instance.
prim :: forall a s. Prim' a => a -> Poke s
byteString :: ByteString -> Poke RealWorld
byteArray# :: ByteArray# -> Int# -> Int# -> Poke s
-- | essentially memset
replicateByte :: Int -> Word8 -> Poke RealWorld
-- | Use a struct poke as a regular poke.
--
-- To do this, we must associate a constant byte length with an existing
-- poker. Note that pokers don't expose the type of the data they are
-- serializing, so this is a very clumsy operation by itself. You should
-- only be using this when you have such types in scope, and the constant
-- length should be obtained in a sensible manner (e.g.
-- KnownSizeOf for generic struct pokers, or your own constant
-- size class if you're doing funky stuff).
fromStructPoke :: Int -> Poke s -> Poke s
-- | Use a struct poke as a regular poke by throwing away the return
-- offset.
toStructPoke :: Poke s -> Poke s
instance GHC.Base.Semigroup (Bytezap.Poke.Poke s)
instance GHC.Base.Monoid (Bytezap.Poke.Poke s)
module Bytezap.Write.Internal
-- | A Poke buffer write operation with the associated length to be
-- written.
--
-- The length may be either exact or a maximum.
--
-- TODO strictness?
data Write (lt :: LengthType) s
Write :: Int -> Poke s -> Write (lt :: LengthType) s
-- | Length of the write in bytes.
--
-- This is not statically asserted. Any time you construct a
-- Write, you must promise this.
--
-- For Write ExactLength s, this is an exact
-- measurement. For Write MaxLength s, this is a
-- maximum.
[writeLength] :: Write (lt :: LengthType) s -> Int
-- | The Poke buffer write operation.
[writeOp] :: Write (lt :: LengthType) s -> Poke s
-- | What a buffer write length field means.
data LengthType
-- | Exact length to be written.
ExactLength :: LengthType
-- | Maximum length to be written.
MaxLength :: LengthType
-- | Turn a Write ExactLength into a
-- Write MaxLength.
writeMax :: Write ExactLength s -> Write MaxLength s
-- | Sequence a Write MaxLength and a
-- Write ExactLength left-to-right.
writeMaxExact :: Write MaxLength s -> Write ExactLength s -> Write MaxLength s
-- | Sequence a Write MaxLength and a
-- Write ExactLength left-to-right.
writeExactMax :: Write ExactLength s -> Write MaxLength s -> Write MaxLength s
-- | Sequence two Writes left-to-right.
--
-- Unsafe, as it ignores LengthTypes.
--
-- TODO strictness? INLINE[1]? INLINE[0]?
writeCombine :: Write ltl s -> Write ltr s -> Write lt s
instance GHC.Base.Semigroup (Bytezap.Write.Internal.Write lt s)
instance GHC.Base.Monoid (Bytezap.Write.Internal.Write lt s)
module Bytezap.Write
-- | A Poke buffer write operation with the associated length to be
-- written.
--
-- The length may be either exact or a maximum.
--
-- TODO strictness?
data Write (lt :: LengthType) s
-- | What a buffer write length field means.
data LengthType
-- | Exact length to be written.
ExactLength :: LengthType
-- | Maximum length to be written.
MaxLength :: LengthType
runWriteBS :: Write ExactLength RealWorld -> ByteString
runWriteBSUptoN :: Write MaxLength RealWorld -> ByteString
prim :: forall a s. Prim' a => a -> Write ExactLength s
byteString :: ByteString -> Write ExactLength RealWorld
byteArray# :: ByteArray# -> Int# -> Int# -> Write ExactLength s
-- | essentially memset
replicateByte :: Int -> Word8 -> Write ExactLength RealWorld
module Bytezap.Poke.Derived.Endian
w16le :: Word16 -> Poke s
w16be :: Word16 -> Poke s
w32le :: Word32 -> Poke s
w32be :: Word32 -> Poke s
w64le :: Word64 -> Poke s
w64be :: Word64 -> Poke s
i16le :: Int16 -> Poke s
i16be :: Int16 -> Poke s
i32le :: Int32 -> Poke s
i32be :: Int32 -> Poke s
i64le :: Int64 -> Poke s
i64be :: Int64 -> Poke s
module Bytezap.Poke.Derived
-- | Poke a ShortByteString.
shortByteString :: ShortByteString -> Poke s
-- | Poke a Text.
text :: Text -> Poke s
-- | Poke a Char.
--
-- Adapted from utf8-string.
char :: Char -> Poke s
-- | unsafePokeIndexed pokeAt off n performs n indexed
-- pokes starting from off.
--
-- Does not check bounds. Largely intended for bytewise pokes where some
-- work needs to be performed for each byte (e.g. escaping text and
-- poking inline).
unsafePokeIndexed :: (Int -> Poke s) -> Int -> Int -> Poke s
module Bytezap.Write.Derived
-- | Write a ShortByteString.
shortByteString :: ShortByteString -> Write ExactLength s
-- | Write a Text.
text :: Text -> Write ExactLength s
-- | Write a Char.
--
-- Adapted from utf8-string.
char :: Char -> Write ExactLength s
module Bytezap.Poke.Json
escapedLength8 :: Text -> Int
escapeW8 :: Word8 -> Int
pokeEscapedTextUnquoted :: Text -> Poke s
pokeEscapeW8 :: Word8 -> Poke s
w8AsciiHex :: Word8 -> Poke s
c_lower_hex_table :: Ptr CChar
-- | Struct parser.
--
-- We do still have to do failure checking, because unlike C we check
-- some types (e.g. bitfields). Hopefully inlining can remove those
-- checks when unnecessary.
module Bytezap.Parser.Struct
type PureMode = Proxy# Void
type IOMode = State# RealWorld
type STMode s = State# s
type ParserT# (st :: ZeroBitType) e a = ForeignPtrContents {-^ pointer provenance -} -> Addr# {-^ base address -} -> Int# {-^ cursor offset from base -} -> st {-^ state token -} -> Res# st e a
-- | Like flatparse, but no buffer length (= no buffer overflow checking),
-- and no Addr# on success (= no dynamic length parses).
--
-- we take a ForeignPtrContents because it lets us create
-- bytestrings without copying if we want. it's useful
newtype ParserT (st :: ZeroBitType) e a
ParserT :: ParserT# st e a -> ParserT (st :: ZeroBitType) e a
[runParserT#] :: ParserT (st :: ZeroBitType) e a -> ParserT# st e a
-- | The type of pure parsers.
type Parser = ParserT PureMode
-- | The type of parsers which can embed IO actions.
type ParserIO = ParserT IOMode
-- | The type of parsers which can embed ST actions.
type ParserST s = ParserT (STMode s)
-- | Primitive parser result wrapped with a state token.
--
-- You should rarely need to manipulate values of this type directly. Use
-- the provided bidirectional pattern synonyms OK#, Fail#
-- and Err#.
type Res# (st :: ZeroBitType) e a = (# st, ResI# e a #)
-- | Primitive parser result.
--
-- Like flatparse, but no Addr# on success.
type ResI# e a = (# (# a #) | (# #) | (# e #) #)
-- | Res# constructor for a successful parse. Contains the return
-- value and a state token.
pattern OK# :: (st :: ZeroBitType) -> a -> Res# st e a
-- | Res# constructor for recoverable failure. Contains only a state
-- token.
pattern Fail# :: (st :: ZeroBitType) -> Res# st e a
-- | Res# constructor for errors which are by default
-- non-recoverable. Contains the error, plus a state token.
pattern Err# :: (st :: ZeroBitType) -> e -> Res# st e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParserBs :: forall a e. ByteString -> Parser e a -> Result e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParserPtr :: forall a e. Ptr Word8 -> Parser e a -> Result e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParserFPtr :: forall a e. ForeignPtr Word8 -> Parser e a -> Result e a
-- | caller must guarantee that buffer is long enough for parser!!
unsafeRunParser' :: forall a e. Addr# -> ForeignPtrContents -> Parser e a -> Result e a
-- | Higher-level boxed data type for parsing results.
data Result e a
-- | Contains return value.
OK :: a -> Result e a
-- | Recoverable-by-default failure.
Fail :: Result e a
-- | Unrecoverable-by-default error.
Err :: !e -> Result e a
-- | can't provide via pure as no Applicative
constParse :: a -> ParserT st e a
sequenceParsers :: Int -> (a -> b -> c) -> ParserT st e a -> ParserT st e b -> ParserT st e c
prim :: forall a st e. Prim' a => ParserT st e a
-- | parse literal
lit :: Eq a => a -> ParserT st e a -> ParserT st e ()
-- | parse literal (CPS)
withLit :: Eq a => Int# -> a -> ParserT st e a -> ParserT st e r -> ParserT st e r
-- | parse literal, return first (leftmost) failing byte on error (CPS)
--
-- This can be used to parse large literals via chunking, rather than
-- byte-by-byte, while retaining useful error behaviour.
--
-- We don't check equality with XOR even though we use that when handling
-- errors, because it's hard to tell if it would be faster with modern
-- CPUs and compilers.
withLitErr :: (Num a, FiniteBits a) => (Int -> a -> e) -> Int# -> a -> (Addr# -> Int# -> a) -> ParserT st e r -> ParserT st e r
-- | Given two non-equal words wActual and wExpect,
-- return the index of the first non-matching byte. Zero indexed.
--
-- If both words are equal, returns word_size (e.g. 4 for
-- Word32).
firstNonMatchByteIdx :: FiniteBits a => a -> a -> Int
-- | Get the byte at the given index.
--
-- The return value is guaranteed to be 0x00 - 0xFF (inclusive).
--
-- TODO meaning based on endianness?
unsafeByteAt :: (Num a, Bits a) => a -> Int -> a
instance (GHC.Show.Show a, GHC.Show.Show e) => GHC.Show.Show (Bytezap.Parser.Struct.Result e a)
instance GHC.Base.Functor (Bytezap.Parser.Struct.ParserT st e)
-- | raehik's bytestring extras (reimplementations of unexported
-- internals).
module Tmp.BSExt
-- | Create a ByteString of size l and use action
-- f to fill its contents.
--
-- Reimplemented from the unexported function
-- 'Data.ByteString.Internal.Type.createFp.
createFp :: Int -> (ForeignPtr Word8 -> IO ()) -> IO ByteString
-- | Copy the given number of bytes from the second area (source) into the
-- first (destination); the copied areas may not overlap.
--
-- Reimplemented from the unexported function memcpyFp.
memcpyFp :: ForeignPtr Word8 -> ForeignPtr Word8 -> Int -> IO ()
createUptoNCPS :: Int -> (Ptr Word8 -> (Int -> IO ByteString) -> IO r) -> IO r
createCPS :: Int -> (ForeignPtr Word8 -> Int -> IO r) -> ((Int -> IO r) -> Ptr Word8 -> IO r) -> IO r
createAndTrimCPS :: Int -> (Ptr Word8 -> (Int -> IO ByteString) -> IO r) -> IO r
unsafeCreateAndTrimCPS :: Int -> (Ptr Word8 -> (Int -> IO ByteString) -> IO r) -> r
unsafeDupablePerformIOByteString :: IO a -> a
createAndTrimFailable :: Int -> (Ptr Word8 -> IO (Either e Int)) -> IO (Either e ByteString)
createFpAndTrimFailable :: Int -> (ForeignPtr Word8 -> IO (Either e Int)) -> IO (Either e ByteString)
wrapAction :: (Ptr Word8 -> IO res) -> ForeignPtr Word8 -> IO res
createUptoNFailable :: Int -> (Ptr Word8 -> IO (Either e Int)) -> IO (Either e ByteString)
createFpUptoNFailable :: Int -> (ForeignPtr Word8 -> IO (Either e Int)) -> IO (Either e ByteString)
createFailable :: Int -> (Ptr Word8 -> IO (Either e Int)) -> IO (Either e ByteString)
createFpFailable :: Int -> (ForeignPtr Word8 -> IO (Either e Int)) -> IO (Either e ByteString)
-- | Variant of fromForeignPtr0 that calls
-- deferForeignPtrAvailability
mkDeferredByteString :: ForeignPtr Word8 -> Int -> IO ByteString
-- | Low-level bytestring builder using continuation parsing.
--
-- bytezap's builder is highly performant. However, one thing it can't do
-- is fail. We have no way to flag an error. If you force it, you
-- will either
--
--
-- - write an initial assert, followed by an unsafe builder that relies
-- on it, or
-- - build a builder as we assert, then execute it once we're
-- ready
--
--
-- The former is inefficient in situations where the check scales
-- similarly with the build (e.g. both must iterate over the input). And
-- the latter is very silly since your builder will be allocating all
-- over.
--
-- A naive failable builder might use Either e Int
-- to flag errors. After executing, we check the result: if Right,
-- we resize to the given actual length; if Left, we discard the
-- buffer with the given error. This is fine... but it's an extra
-- allocation, and limits us to Either. A shame.
--
-- Instead, we design a builder that takes a finalizer continuation
-- Int# -> ByteString, which is passed the
-- final offset. The builder calls this as it finishes, wrapping it as
-- needed (or leaving as ByteString for a non-failable builder).
-- The runner is expected to pass a continuation to perform any buffer
-- reallocation necessary (if the actual length was less than the max
-- length), and return a ByteString, possibly wrapped in e.g.
-- Right.
--
-- This is much harder to use than the regular builder, and they can't be
-- combined (the regular builder permits sequencing, which this can't
-- support). But it fills a gap!
--
-- Unlike the regular builder we stick with IO, because the
-- continuations get weird otherwise.
module Bytezap.PokeCPS
type PokeCPS# r = Addr# -> Int# -> (Int# -> IO ByteString) -> IO r
-- | PokeCPS# newtype wrapper.
--
-- Does not permit a Semigroup instance because pokes do not
-- return offset information.
newtype PokeCPS r
PokeCPS :: PokeCPS# r -> PokeCPS r
[unPokeCPS] :: PokeCPS r -> PokeCPS# r
emptyPokeCPS :: PokeCPS ByteString
full :: ByteArray -> Ptr Word8 -> (Int -> IO r) -> (Word8 -> IO r) -> (Word8 -> IO r) -> Int -> Int -> Int -> IO r
withHexNibbles :: (Word8 -> r) -> Word8 -> Word8 -> (Word8 -> r) -> r
withByteAsHexDigit :: Word8 -> (Word8 -> r) -> (Word8 -> r) -> r
textToByteStringUptoIO :: Text -> IO (Either String ByteString)
-- | Handy typenat utils.
module Util.TypeNats
natVal'' :: forall n. KnownNat n => Natural
natValInt :: forall n. KnownNat n => Int
-- | Generics for bytezap's struct serializer.
--
-- We can't use my generic-data-functions library, because we're doing
-- more than just basic monoidal composition. But I still want the same
-- pluggable generics, where the user provides the class to use for base
-- cases. So I do that. However, unlike g-d-f, the class info can't be
-- provided via the user-selected monoid, because you don't select that.
-- Instead, we take a simple "index" type. It's pretty much the same
-- idea, surprisingly. This way, we can provide a few sensible "versions"
-- like in g-f-d, while primarily designing for DIY.
module Bytezap.Struct.Generic
-- | Class for holding info on class to use for poking base cases.
--
-- The type is just used to map to class info. It is never instantiated.
-- By packing KnownSizeOf into here, we don't need to enforce a
-- type-level solution! Now it's up to you how you want to track your
-- constant lengths.
--
-- We stay unboxed here because the internals are unboxed, just for
-- convenience. Maybe this is bad, let me know.
class GPokeBase tag where {
-- | The state token of our poker.
type GPokeBaseSt tag;
-- | The type class that provides base case poking.
--
-- The type class should provide a function that looks like
-- gPokeBase.
type GPokeBaseC tag a :: Constraint;
type GPokeBaseLenTF tag :: Type ~> Natural;
}
gPokeBase :: (GPokeBase tag, GPokeBaseC tag a) => a -> Poke# (GPokeBaseSt tag)
class GPoke tag f
gPoke :: GPoke tag f => f p -> Poke# (GPokeBaseSt tag)
instance forall k1 k2 (tag :: k1) (f :: k2 -> GHC.Types.Type) (c :: GHC.Generics.Meta). Bytezap.Struct.Generic.GPoke tag f => Bytezap.Struct.Generic.GPoke tag (GHC.Generics.D1 c f)
instance forall k1 k2 (tag :: k1) (f :: k2 -> GHC.Types.Type) (c :: GHC.Generics.Meta). Bytezap.Struct.Generic.GPoke tag f => Bytezap.Struct.Generic.GPoke tag (GHC.Generics.C1 c f)
instance forall k1 k2 (tag :: k1) (l :: k2 -> GHC.Types.Type) (r :: k2 -> GHC.Types.Type) (lenL :: GHC.Num.Natural.Natural). (Bytezap.Struct.Generic.GPoke tag l, Bytezap.Struct.Generic.GPoke tag r, Bytezap.Struct.Generic.GPokeBase tag, lenL GHC.Types.~ Bytezap.Common.Generic.GTFoldMapCAddition (Bytezap.Struct.Generic.GPokeBaseLenTF tag) l, GHC.TypeNats.KnownNat lenL) => Bytezap.Struct.Generic.GPoke tag (l GHC.Generics.:*: r)
instance forall k1 k2 (tag :: k1) a (c :: GHC.Generics.Meta). (Bytezap.Struct.Generic.GPokeBase tag, Bytezap.Struct.Generic.GPokeBaseC tag a) => Bytezap.Struct.Generic.GPoke tag (GHC.Generics.S1 c (GHC.Generics.Rec0 a))
instance forall k1 k2 (tag :: k1). Bytezap.Struct.Generic.GPoke tag GHC.Generics.U1
-- | Pokes with type-level poke length.
module Bytezap.Poke.KnownLen
newtype PokeKnownLen (len :: Natural) s
PokeKnownLen :: Poke s -> PokeKnownLen (len :: Natural) s
[unPokeKnownLen] :: PokeKnownLen (len :: Natural) s -> Poke s
mappend' :: PokeKnownLen n s -> PokeKnownLen m s -> PokeKnownLen (n + m) s
mempty' :: PokeKnownLen 0 s
runPokeKnownLenBS :: forall n. KnownNat n => PokeKnownLen n RealWorld -> ByteString
prim :: Prim' a => a -> PokeKnownLen (SizeOf a) s
-- | Efficient type-level bytestring parsing via chunking.
--
-- See Bytes for an explanation on the chunking design.
--
-- On mismatch, the index of the failing byte and its value are returned.
-- (This is over-engineered to be extremely efficient.)
--
-- Type classes take a Natural for tracking the current index in
-- the type-level bytestring. We do this on the type level for
-- performance. Use @0 when calling.
--
-- The parsers take an error wrapper function to enable wrapping the
-- error into any parser with confidence that it won't do extra
-- allocations/wrapping.
--
-- The parsers here either return the unit () or a pretty error.
-- No Fail#.
--
-- TODO check generated Core, assembly
module Bytezap.Parser.Struct.TypeLits.Bytes
-- | Parse a type-level bytestring, largest grouping Word64.
class ParseReifyBytesW64 (idx :: Natural) (bs :: [Natural])
parseReifyBytesW64 :: ParseReifyBytesW64 idx bs => (Int -> Word8 -> e) -> ParserT st e ()
-- | Parse a type-level bytestring, largest grouping Word32.
class ParseReifyBytesW32 (idx :: Natural) (bs :: [Natural])
parseReifyBytesW32 :: ParseReifyBytesW32 idx bs => (Int -> Word8 -> e) -> ParserT st e ()
-- | Parse a type-level bytestring, largest grouping Word16.
class ParseReifyBytesW16 (idx :: Natural) (bs :: [Natural])
parseReifyBytesW16 :: ParseReifyBytesW16 idx bs => (Int -> Word8 -> e) -> ParserT st e ()
-- | Serialize a type-level bytestring, byte-by-byte.
class ParseReifyBytesW8 (idx :: Natural) (bs :: [Natural])
parseReifyBytesW8 :: ParseReifyBytesW8 idx bs => (Int -> Word8 -> e) -> ParserT st e ()
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 idx bs => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 idx bs
instance (Data.Type.Byte.ReifyW8 b0, GHC.TypeNats.KnownNat idx, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 (idx GHC.TypeNats.+ 1) bs) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 idx (b0 : bs)
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW8 idx '[]
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 idx bs => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 idx bs
instance (Data.Type.Byte.ReifyW8 b0, Data.Type.Byte.ReifyW8 b1, GHC.TypeNats.KnownNat idx, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 (idx GHC.TypeNats.+ 2) bs) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW16 idx (b0 : b1 : bs)
instance Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 idx bs => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW64 idx bs
instance (Data.Type.Byte.ReifyW8 b0, Data.Type.Byte.ReifyW8 b1, Data.Type.Byte.ReifyW8 b2, Data.Type.Byte.ReifyW8 b3, GHC.TypeNats.KnownNat idx, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 (idx GHC.TypeNats.+ 4) bs) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW32 idx (b0 : b1 : b2 : b3 : bs)
instance (Data.Type.Byte.ReifyW8 b0, Data.Type.Byte.ReifyW8 b1, Data.Type.Byte.ReifyW8 b2, Data.Type.Byte.ReifyW8 b3, Data.Type.Byte.ReifyW8 b4, Data.Type.Byte.ReifyW8 b5, Data.Type.Byte.ReifyW8 b6, Data.Type.Byte.ReifyW8 b7, GHC.TypeNats.KnownNat idx, Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW64 (idx GHC.TypeNats.+ 8) bs) => Bytezap.Parser.Struct.TypeLits.Bytes.ParseReifyBytesW64 idx (b0 : b1 : b2 : b3 : b4 : b5 : b6 : b7 : bs)
module Bytezap.Parser.Struct.Generic
class GParseBase tag where {
-- | The state token of the parser.
type GParseBaseSt tag :: ZeroBitType;
type GParseBaseC tag a :: Constraint;
type GParseBaseE tag :: Type;
-- | Defunctionalization symbol for a type family turning Types into
-- Naturals. (Needed as we can't partially apply type families.)
type GParseBaseLenTF tag :: Type ~> Natural;
}
gParseBase :: (GParseBase tag, GParseBaseC tag a) => ParserT (GParseBaseSt tag) (GParseBaseE tag) a
class GParse tag gf
gParse :: GParse tag gf => ParserT (GParseBaseSt tag) (GParseBaseE tag) (gf p)
instance forall k1 k2 (tag :: k1) (gf :: k2 -> GHC.Types.Type) (cd :: GHC.Generics.Meta). Bytezap.Parser.Struct.Generic.GParse tag gf => Bytezap.Parser.Struct.Generic.GParse tag (GHC.Generics.D1 cd gf)
instance forall k1 k2 (tag :: k1) (gf :: k2 -> GHC.Types.Type) (cc :: GHC.Generics.Meta). Bytezap.Parser.Struct.Generic.GParse tag gf => Bytezap.Parser.Struct.Generic.GParse tag (GHC.Generics.C1 cc gf)
instance forall k1 k2 (tag :: k1) (l :: k2 -> GHC.Types.Type) (r :: k2 -> GHC.Types.Type) (lenL :: GHC.Num.Natural.Natural). (Bytezap.Parser.Struct.Generic.GParse tag l, Bytezap.Parser.Struct.Generic.GParse tag r, Bytezap.Parser.Struct.Generic.GParseBase tag, lenL GHC.Types.~ Bytezap.Common.Generic.GTFoldMapCAddition (Bytezap.Parser.Struct.Generic.GParseBaseLenTF tag) l, GHC.TypeNats.KnownNat lenL) => Bytezap.Parser.Struct.Generic.GParse tag (l GHC.Generics.:*: r)
instance forall k1 k2 (tag :: k1) a (c :: GHC.Generics.Meta). (Bytezap.Parser.Struct.Generic.GParseBase tag, Bytezap.Parser.Struct.Generic.GParseBaseC tag a) => Bytezap.Parser.Struct.Generic.GParse tag (GHC.Generics.S1 c (GHC.Generics.Rec0 a))
instance forall k1 k2 (tag :: k1). Bytezap.Parser.Struct.Generic.GParse tag GHC.Generics.U1