-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/
-- | Hackage security library
--
-- The hackage security library provides both server and client utilities
-- for securing the Hackage package server
-- (http://hackage.haskell.org/). It is based on The Update
-- Framework (http://theupdateframework.com/), a set of
-- recommendations developed by security researchers at various
-- universities in the US as well as developers on the Tor project
-- (https://www.torproject.org/).
--
-- The current implementation supports only index signing, thereby
-- enabling untrusted mirrors. It does not yet provide facilities for
-- author package signing.
--
-- The library has two main entry points: Hackage.Security.Client
-- is the main entry point for clients (the typical example being
-- cabal), and Hackage.Security.Server is the main entry
-- point for servers (the typical example being hackage-server).
@package hackage-security
@version 0.5.3.0
-- | Producing human-reaadable strings
module Hackage.Security.Util.Pretty
-- | Produce a human-readable string
class Pretty a
pretty :: Pretty a => a -> String
-- | Hiding existentials
module Hackage.Security.Util.Some
data Some f
Some :: f a -> Some f
data DictEq a
[DictEq] :: Eq a => DictEq a
-- | Type f satisfies SomeEq f if f a satisfies
-- Eq independent of a
class SomeEq f
someEq :: SomeEq f => DictEq (f a)
data DictShow a
[DictShow] :: Show a => DictShow a
-- | Type f satisfies SomeShow f if f a
-- satisfies Show independent of a
class SomeShow f
someShow :: SomeShow f => DictShow (f a)
data DictPretty a
[DictPretty] :: Pretty a => DictPretty a
-- | Type f satisfies SomeShow f if f a
-- satisfies Show independent of a
class SomePretty f
somePretty :: SomePretty f => DictPretty (f a)
typecheckSome :: Typed f => Some f -> Some (TypeOf f) -> Bool
instance Hackage.Security.Util.Some.SomePretty f => Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Util.Some.Some f)
instance Hackage.Security.Util.Some.SomeShow f => GHC.Show.Show (Hackage.Security.Util.Some.Some f)
instance (Hackage.Security.Util.TypedEmbedded.Typed f, Hackage.Security.Util.Some.SomeEq f) => GHC.Classes.Eq (Hackage.Security.Util.Some.Some f)
-- | A more type-safe version of file paths
--
-- This module is intended to replace imports of System.FilePath, and
-- additionally exports thin wrappers around common IO functions. To
-- facilitate importing this module unqualified we also re-export some
-- definitions from System.IO (importing both would likely lead to name
-- clashes).
--
-- Note that his module does not import any other modules from
-- Hackage.Security; everywhere else we use Path instead of FilePath
-- directly.
module Hackage.Security.Util.Path
-- | Paths
--
-- A Path is simply a FilePath with a type-level tag
-- indicating where this path is rooted (relative to the current
-- directory, absolute path, relative to a web domain, whatever). Most
-- operations on Path are just lifted versions of the operations
-- on the underlying FilePath. The tag however allows us to give a
-- lot of operations a more meaningful type. For instance, it does not
-- make sense to append two absolute paths together; instead, we can only
-- append an unrooted path to another path. It also means we avoid bugs
-- where we use one kind of path where we expect another.
newtype Path a
Path :: FilePath -> Path a
-- | Reinterpret the root of a path
--
-- This literally just changes the type-level tag; use with caution!
castRoot :: Path root -> Path root'
takeDirectory :: Path a -> Path a
takeFileName :: Path a -> String
(<.>) :: Path a -> String -> Path a
splitExtension :: Path a -> (Path a, String)
takeExtension :: Path a -> String
-- | Type-level tag for unrooted paths
--
-- Unrooted paths need a root before they can be interpreted.
data Unrooted
(>) :: Path a -> Path Unrooted -> Path a
-- | Reinterpret an unrooted path
--
-- This is an alias for castRoot; see comments there.
rootPath :: Path Unrooted -> Path root
-- | Forget a path's root
--
-- This is an alias for castRoot; see comments there.
unrootPath :: Path root -> Path Unrooted
-- | Convert a relative/unrooted Path to a FilePath (using POSIX style
-- directory separators).
--
-- See also toAbsoluteFilePath
toUnrootedFilePath :: Path Unrooted -> FilePath
-- | Convert from a relative/unrooted FilePath (using POSIX style directory
-- separators).
fromUnrootedFilePath :: FilePath -> Path Unrooted
-- | A path fragment (like a single directory or filename)
fragment :: String -> Path Unrooted
joinFragments :: [String] -> Path Unrooted
splitFragments :: Path Unrooted -> [String]
isPathPrefixOf :: Path Unrooted -> Path Unrooted -> Bool
data Relative
data Absolute
data HomeDir
-- | A file system root can be interpreted as an (absolute) FilePath
class FsRoot root
-- | Convert a Path to an absolute FilePath (using native style directory
-- separators).
toAbsoluteFilePath :: FsRoot root => Path root -> IO FilePath
-- | Abstract over a file system root
--
-- see fromFilePath
data FsPath
FsPath :: Path root -> FsPath
toFilePath :: Path Absolute -> FilePath
fromFilePath :: FilePath -> FsPath
makeAbsolute :: FsPath -> IO (Path Absolute)
fromAbsoluteFilePath :: FilePath -> Path Absolute
-- | Wrapper around withFile
withFile :: FsRoot root => Path root -> IOMode -> (Handle -> IO r) -> IO r
-- | Wrapper around openBinaryTempFileWithDefaultPermissions
--
-- NOTE: The caller is responsible for cleaning up the temporary file.
openTempFile' :: FsRoot root => Path root -> String -> IO (Path Absolute, Handle)
readLazyByteString :: FsRoot root => Path root -> IO ByteString
readStrictByteString :: FsRoot root => Path root -> IO ByteString
writeLazyByteString :: FsRoot root => Path root -> ByteString -> IO ()
writeStrictByteString :: FsRoot root => Path root -> ByteString -> IO ()
copyFile :: (FsRoot root, FsRoot root') => Path root -> Path root' -> IO ()
createDirectory :: FsRoot root => Path root -> IO ()
createDirectoryIfMissing :: FsRoot root => Bool -> Path root -> IO ()
removeDirectory :: FsRoot root => Path root -> IO ()
doesFileExist :: FsRoot root => Path root -> IO Bool
doesDirectoryExist :: FsRoot root => Path root -> IO Bool
getModificationTime :: FsRoot root => Path root -> IO UTCTime
removeFile :: FsRoot root => Path root -> IO ()
getTemporaryDirectory :: IO (Path Absolute)
-- | Return the immediate children of a directory
--
-- Filters out "." and "..".
getDirectoryContents :: FsRoot root => Path root -> IO [Path Unrooted]
-- | Recursive traverse a directory structure
--
-- Returns a set of paths relative to the directory specified. The list
-- is lazily constructed, so that directories are only read when
-- required. (This is also essential to ensure that this function does
-- not build the entire result in memory before returning, potentially
-- running out of heap.)
getRecursiveContents :: FsRoot root => Path root -> IO [Path Unrooted]
renameFile :: (FsRoot root, FsRoot root') => Path root -> Path root' -> IO ()
getCurrentDirectory :: IO (Path Absolute)
data Tar
tarIndexLookup :: TarIndex -> Path Tar -> Maybe TarIndexEntry
tarAppend :: (FsRoot root, FsRoot root') => Path root -> Path root' -> [Path Tar] -> IO ()
data Web
toURIPath :: FilePath -> Path Web
fromURIPath :: Path Web -> FilePath
uriPath :: URI -> Path Web
modifyUriPath :: URI -> (Path Web -> Path Web) -> URI
-- | See openFile
data IOMode
ReadMode :: IOMode
WriteMode :: IOMode
AppendMode :: IOMode
ReadWriteMode :: IOMode
-- | Three kinds of buffering are supported: line-buffering,
-- block-buffering or no-buffering. These modes have the following
-- effects. For output, items are written out, or flushed, from
-- the internal buffer according to the buffer mode:
--
--
-- - line-buffering: the entire output buffer is flushed
-- whenever a newline is output, the buffer overflows, a hFlush is
-- issued, or the handle is closed.
-- - block-buffering: the entire buffer is written out whenever
-- it overflows, a hFlush is issued, or the handle is closed.
-- - no-buffering: output is written immediately, and never
-- stored in the buffer.
--
--
-- An implementation is free to flush the buffer more frequently, but not
-- less frequently, than specified above. The output buffer is emptied as
-- soon as it has been written out.
--
-- Similarly, input occurs according to the buffer mode for the handle:
--
--
-- - line-buffering: when the buffer for the handle is not
-- empty, the next item is obtained from the buffer; otherwise, when the
-- buffer is empty, characters up to and including the next newline
-- character are read into the buffer. No characters are available until
-- the newline character is available or the buffer is full.
-- - block-buffering: when the buffer for the handle becomes
-- empty, the next block of data is read into the buffer.
-- - no-buffering: the next input item is read and returned. The
-- hLookAhead operation implies that even a no-buffered handle may
-- require a one-character buffer.
--
--
-- The default buffering mode when a handle is opened is
-- implementation-dependent and may depend on the file system object
-- which is attached to that handle. For most implementations, physical
-- files will normally be block-buffered and terminals will normally be
-- line-buffered.
data BufferMode
-- | buffering is disabled if possible.
NoBuffering :: BufferMode
-- | line-buffering should be enabled if possible.
LineBuffering :: BufferMode
-- | block-buffering should be enabled if possible. The size of the buffer
-- is n items if the argument is Just n and is
-- otherwise implementation-dependent.
BlockBuffering :: Maybe Int -> BufferMode
-- | Haskell defines operations to read and write characters from and to
-- files, represented by values of type Handle. Each value of
-- this type is a handle: a record used by the Haskell run-time
-- system to manage I/O with file system objects. A handle has at
-- least the following properties:
--
--
-- - whether it manages input or output or both;
-- - whether it is open, closed or
-- semi-closed;
-- - whether the object is seekable;
-- - whether buffering is disabled, or enabled on a line or block
-- basis;
-- - a buffer (whose length may be zero).
--
--
-- Most handles will also have a current I/O position indicating where
-- the next input or output operation will occur. A handle is
-- readable if it manages only input or both input and output;
-- likewise, it is writable if it manages only output or both
-- input and output. A handle is open when first allocated. Once
-- it is closed it can no longer be used for either input or output,
-- though an implementation cannot re-use its storage while references
-- remain to it. Handles are in the Show and Eq classes.
-- The string produced by showing a handle is system dependent; it should
-- include enough information to identify the handle for debugging. A
-- handle is equal according to == only to itself; no attempt is
-- made to compare the internal state of different handles for equality.
data Handle
-- | A mode that determines the effect of hSeek hdl mode
-- i.
data SeekMode
-- | the position of hdl is set to i.
AbsoluteSeek :: SeekMode
-- | the position of hdl is set to offset i from the
-- current position.
RelativeSeek :: SeekMode
-- | the position of hdl is set to offset i from the end
-- of the file.
SeekFromEnd :: SeekMode
-- | Computation hSetBuffering hdl mode sets the mode of
-- buffering for handle hdl on subsequent reads and writes.
--
-- If the buffer mode is changed from BlockBuffering or
-- LineBuffering to NoBuffering, then
--
--
-- - if hdl is writable, the buffer is flushed as for
-- hFlush;
-- - if hdl is not writable, the contents of the buffer is
-- discarded.
--
--
-- This operation may fail with:
--
--
-- - isPermissionError if the handle has already been used for
-- reading or writing and the implementation does not allow the buffering
-- mode to be changed.
--
hSetBuffering :: Handle -> BufferMode -> IO ()
-- | Computation hClose hdl makes handle hdl
-- closed. Before the computation finishes, if hdl is writable
-- its buffer is flushed as for hFlush. Performing hClose
-- on a handle that has already been closed has no effect; doing so is
-- not an error. All other operations on a closed handle will fail. If
-- hClose fails for any reason, any further operations (apart from
-- hClose) on the handle will still fail as if hdl had
-- been successfully closed.
hClose :: Handle -> IO ()
-- | For a handle hdl which attached to a physical file,
-- hFileSize hdl returns the size of that file in 8-bit
-- bytes.
hFileSize :: Handle -> IO Integer
-- | Computation hSeek hdl mode i sets the position of
-- handle hdl depending on mode. The offset i
-- is given in terms of 8-bit bytes.
--
-- If hdl is block- or line-buffered, then seeking to a position
-- which is not in the current buffer will first cause any items in the
-- output buffer to be written to the device, and then cause the input
-- buffer to be discarded. Some handles may not be seekable (see
-- hIsSeekable), or only support a subset of the possible
-- positioning operations (for instance, it may only be possible to seek
-- to the end of a tape, or to a positive offset from the beginning or
-- current position). It is not possible to set a negative I/O position,
-- or for a physical file, an I/O position beyond the current
-- end-of-file.
--
-- This operation may fail with:
--
--
-- - isIllegalOperationError if the Handle is not seekable, or
-- does not support the requested seek mode.
-- - isPermissionError if a system resource limit would be
-- exceeded.
--
hSeek :: Handle -> SeekMode -> Integer -> IO ()
instance GHC.Classes.Ord (Hackage.Security.Util.Path.Path a)
instance GHC.Classes.Eq (Hackage.Security.Util.Path.Path a)
instance GHC.Show.Show (Hackage.Security.Util.Path.Path a)
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Util.Path.Path Hackage.Security.Util.Path.Tar)
instance Hackage.Security.Util.Path.FsRoot Hackage.Security.Util.Path.Relative
instance Hackage.Security.Util.Path.FsRoot Hackage.Security.Util.Path.Absolute
instance Hackage.Security.Util.Path.FsRoot Hackage.Security.Util.Path.HomeDir
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Util.Path.Path Hackage.Security.Util.Path.HomeDir)
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Util.Path.Path Hackage.Security.Util.Path.Absolute)
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Util.Path.Path Hackage.Security.Util.Path.Relative)
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Util.Path.Path Hackage.Security.Util.Path.Unrooted)
-- | Some very simple lens definitions (to avoid further dependencies)
--
-- Intended to be double-imported > import Hackage.Security.Util.Lens
-- (Lens) > import qualified Hackage.Security.Util.Lens as Lens
module Hackage.Security.Util.Lens
-- | Polymorphic lens
type Lens s t a b = forall f. Functor f => (a -> f b) -> s -> f t
-- | Monomorphic lens
type Lens' s a = Lens s s a a
get :: Lens' s a -> s -> a
modify :: Lens s t a b -> (a -> b) -> s -> t
set :: Lens s t a b -> b -> s -> t
lookupM :: forall a b. (Eq a, Monoid b) => a -> Lens' [(a, b)] b
module Hackage.Security.Util.IO
getFileSize :: (Num a, FsRoot root) => Path root -> IO a
handleDoesNotExist :: IO a -> IO (Maybe a)
-- | Attempt to create a filesystem lock in the specified directory.
--
-- This will use OS-specific file locking primitives:
-- GHC.IO.Handle.Lock with base-4.10" and later or a shim for
-- base@ versions.
--
-- Throws an exception if the lock is already present.
--
-- May fallback to locking via creating a directory: Given a file
-- pathto, we do this by attempting to create the
-- directory /pathto/hackage-security-lock, and deleting
-- the directory again afterwards. Creating a directory that already
-- exists will throw an exception on most OSs (certainly Linux, OSX and
-- Windows) and is a reasonably common way to implement a lock file.
withDirLock :: Path Absolute -> IO a -> IO a
timedIO :: String -> IO a -> IO a
-- | Checked exceptions
module Hackage.Security.Util.Checked
-- | Checked exceptions
class Throws e
unthrow :: forall a e proxy. proxy e -> (Throws e => a) -> a
-- | Throw a checked exception
throwChecked :: (Exception e, Throws e) => e -> IO a
-- | Catch a checked exception
catchChecked :: forall a e. Exception e => (Throws e => IO a) -> (e -> IO a) -> IO a
-- | catchChecked with the arguments reversed
handleChecked :: Exception e => (e -> IO a) -> (Throws e => IO a) -> IO a
-- | Like try, but for checked exceptions
tryChecked :: Exception e => (Throws e => IO a) -> IO (Either e a)
-- | Rethrow IO exceptions as checked exceptions
checkIO :: Throws IOException => IO a -> IO a
-- | Throw an unchecked exception
--
-- This is just an alias for throw, but makes it evident that
-- this is a very intentional use of an unchecked exception.
throwUnchecked :: Exception e => e -> IO a
-- | Variation on throwUnchecked for internal errors
internalError :: String -> IO a
instance GHC.Show.Show Hackage.Security.Util.Checked.SyncException
instance Hackage.Security.Util.Checked.Throws (Hackage.Security.Util.Checked.Catch e)
instance GHC.Exception.Type.Exception Hackage.Security.Util.Checked.SyncException
module Hackage.Security.Client.Verify
-- | Verification monad
--
-- The verification monad is similar to ResourceT in intent, in
-- that we can register handlers to be run to release resources. Unlike
-- ResourceT, however, we maintain _two_ handlers: a cleanup
-- handler which is run whether or not verification succeeds, and a
-- finalisation handler which is run only if verification succeeds.
--
--
-- - Cleanup handlers are registered using acquire, and are
-- guaranteed to run just before the computation terminates (after the
-- finalisation handler).
-- - The finalisation handlers are run only when verification succeeds,
-- and can be registered with ifVerified. Finalisation can be used
-- for instance to update the local cache (which should only happen if
-- verification is successful).
--
data Verify a
-- | Run an action in the Verify monad
runVerify :: (Finaliser -> Finaliser) -> Verify a -> IO a
-- | Acquire a resource and register the corresponding cleanup handler
--
-- NOTE: Resource acquisition happens with exceptions masked. If it is
-- important that the resource acquistion can be timed out (or receive
-- other kinds of asynchronous exceptions), you will need to use an
-- interruptible operation. See
-- http://www.well-typed.com/blog/2014/08/asynchronous-exceptions/
-- for details.
acquire :: IO a -> (a -> IO ()) -> Verify a
-- | Register an action to be run only if verification succeeds
ifVerified :: IO () -> Verify ()
-- | Create a short-lived temporary file
--
-- Creates the directory where the temp file should live if it does not
-- exist.
openTempFile :: FsRoot root => Path root -> String -> Verify (Path Absolute, Handle)
-- | Lift a computation from the IO monad.
liftIO :: MonadIO m => IO a -> m a
instance Control.Monad.IO.Class.MonadIO Hackage.Security.Client.Verify.Verify
instance GHC.Base.Monad Hackage.Security.Client.Verify.Verify
instance GHC.Base.Applicative Hackage.Security.Client.Verify.Verify
instance GHC.Base.Functor Hackage.Security.Client.Verify.Verify
module Hackage.Security.Client.Formats
data FormatUn
data FormatGz
-- | Format is a singleton type (reflection type to term level)
--
-- NOTE: In the future we might add further compression formats.
data Format :: * -> *
[FUn] :: Format FormatUn
[FGz] :: Format FormatGz
-- | Available formats
--
-- Rather than having a general list here, we enumerate all
-- possibilities. This means we are very precise about what we expect,
-- and we avoid any runtime errors about unexpect format definitions.
--
-- NOTE: If we add additional cases here (for dealing with additional
-- formats) all calls to error "inaccessible" need to be
-- reevaluated.
data Formats :: * -> * -> *
[FsNone] :: Formats () a
[FsUn] :: a -> Formats (FormatUn :- ()) a
[FsGz] :: a -> Formats (FormatGz :- ()) a
[FsUnGz] :: a -> a -> Formats (FormatUn :- (FormatGz :- ())) a
-- | HasFormat fs f is a proof that f is a key in
-- fs.
--
-- See formatsMember and formatsLookup for typical usage.
data HasFormat :: * -> * -> *
[HFZ] :: Format f -> HasFormat (f :- fs) f
[HFS] :: HasFormat fs f -> HasFormat (f' :- fs) f
hasFormatAbsurd :: HasFormat () f -> a
hasFormatGet :: HasFormat fs f -> Format f
formatsMap :: (forall f. Format f -> a -> b) -> Formats fs a -> Formats fs b
formatsMember :: Format f -> Formats fs a -> Maybe (HasFormat fs f)
formatsLookup :: HasFormat fs f -> Formats fs a -> a
instance GHC.Show.Show (Hackage.Security.Client.Formats.Format f)
instance GHC.Classes.Eq (Hackage.Security.Client.Formats.Format f)
instance GHC.Classes.Eq a => GHC.Classes.Eq (Hackage.Security.Client.Formats.Formats fs a)
instance GHC.Show.Show a => GHC.Show.Show (Hackage.Security.Client.Formats.Formats fs a)
instance GHC.Classes.Eq (Hackage.Security.Client.Formats.HasFormat fs f)
instance GHC.Show.Show (Hackage.Security.Client.Formats.HasFormat fs f)
instance GHC.Base.Functor (Hackage.Security.Client.Formats.Formats fs)
instance Hackage.Security.Util.TypedEmbedded.Unify Hackage.Security.Client.Formats.Format
-- | Minimal implementation of Canonical JSON.
--
-- http://wiki.laptop.org/go/Canonical_JSON
--
-- A "canonical JSON" format is provided in order to provide meaningful
-- and repeatable hashes of JSON-encoded data. Canonical JSON is parsable
-- with any full JSON parser, but security-conscious applications will
-- want to verify that input is in canonical form before authenticating
-- any hash or signature on that input.
--
-- This implementation is derived from the json parser from the json
-- package, with simplifications to meet the Canonical JSON grammar.
--
-- TODO: Known bugs/limitations:
--
--
-- - Decoding/encoding Unicode code-points beyond U+00ff is
-- currently broken
--
module Text.JSON.Canonical
data JSValue
JSNull :: JSValue
JSBool :: !Bool -> JSValue
JSNum :: !Int54 -> JSValue
JSString :: String -> JSValue
JSArray :: [JSValue] -> JSValue
JSObject :: [(String, JSValue)] -> JSValue
-- | 54-bit integer values
--
-- JavaScript can only safely represent numbers between -(2^53 -
-- 1) and 2^53 - 1.
--
-- TODO: Although we introduce the type here, we don't actually do any
-- bounds checking and just inherit all type class instance from Int64.
-- We should probably define fromInteger to do bounds checking,
-- give different instances for type classes such as Bounded and
-- FiniteBits, etc.
data Int54
-- | Parse a canonical JSON format string as a JSON value. The input string
-- does not have to be in canonical form, just in the "canonical JSON"
-- format.
--
-- Use renderCanonicalJSON to convert into canonical form.
parseCanonicalJSON :: ByteString -> Either String JSValue
-- | Render a JSON value in canonical form. This rendered form is canonical
-- and so allows repeatable hashes.
--
-- For pretty printing, see prettyCanonicalJSON.
--
-- NB: Canonical JSON's string escaping rules deviate from RFC 7159 JSON
-- which requires
--
-- "All Unicode characters may be placed within the quotation marks,
-- except for the characters that must be escaped: quotation mark,
-- reverse solidus, and the control characters (U+0000 through
-- U+001F)."
--
-- Whereas the current specification of Canonical JSON explicitly
-- requires to violate this by only escaping the quotation mark and the
-- reverse solidus. This, however, contradicts Canonical JSON's statement
-- that "Canonical JSON is parsable with any full JSON parser"
--
-- Consequently, Canonical JSON is not a proper subset of RFC 7159.
renderCanonicalJSON :: JSValue -> ByteString
-- | Render a JSON value in a reasonable human-readable form. This rendered
-- form is not the canonical form used for repeatable hashes, use
-- renderCanonicalJSON for that.
prettyCanonicalJSON :: JSValue -> String
instance GHC.Classes.Ord Text.JSON.Canonical.JSValue
instance GHC.Classes.Eq Text.JSON.Canonical.JSValue
instance GHC.Read.Read Text.JSON.Canonical.JSValue
instance GHC.Show.Show Text.JSON.Canonical.JSValue
instance Text.Printf.PrintfArg Text.JSON.Canonical.Int54
instance Foreign.Storable.Storable Text.JSON.Canonical.Int54
instance Data.Bits.Bits Text.JSON.Canonical.Int54
instance Data.Bits.FiniteBits Text.JSON.Canonical.Int54
instance GHC.Arr.Ix Text.JSON.Canonical.Int54
instance GHC.Real.Real Text.JSON.Canonical.Int54
instance GHC.Classes.Ord Text.JSON.Canonical.Int54
instance GHC.Num.Num Text.JSON.Canonical.Int54
instance Data.Data.Data Text.JSON.Canonical.Int54
instance GHC.Real.Integral Text.JSON.Canonical.Int54
instance GHC.Classes.Eq Text.JSON.Canonical.Int54
instance GHC.Enum.Enum Text.JSON.Canonical.Int54
instance GHC.Enum.Bounded Text.JSON.Canonical.Int54
instance GHC.Show.Show Text.JSON.Canonical.Int54
instance GHC.Read.Read Text.JSON.Canonical.Int54
module Hackage.Security.Key.Env
-- | A key environment is a mapping from key IDs to the corresponding keys.
--
-- It should satisfy the invariant that these key IDs actually match the
-- keys; see checkKeyEnvInvariant.
data KeyEnv
keyEnvMap :: KeyEnv -> Map KeyId (Some PublicKey)
fromPublicKeys :: [Some PublicKey] -> KeyEnv
fromKeys :: [Some Key] -> KeyEnv
empty :: KeyEnv
null :: KeyEnv -> Bool
insert :: Some PublicKey -> KeyEnv -> KeyEnv
lookup :: KeyId -> KeyEnv -> Maybe (Some PublicKey)
union :: KeyEnv -> KeyEnv -> KeyEnv
instance GHC.Show.Show Hackage.Security.Key.Env.KeyEnv
instance GHC.Base.Monad m => Hackage.Security.Util.JSON.ToJSON m Hackage.Security.Key.Env.KeyEnv
instance Hackage.Security.Util.JSON.ReportSchemaErrors m => Hackage.Security.Util.JSON.FromJSON m Hackage.Security.Key.Env.KeyEnv
-- | Hackage-specific wrappers around the Util.JSON module
module Hackage.Security.JSON
data DeserializationError
-- | Malformed JSON has syntax errors in the JSON itself (i.e., we cannot
-- even parse it to a JSValue)
DeserializationErrorMalformed :: String -> DeserializationError
-- | Invalid JSON has valid syntax but invalid structure
--
-- The string gives a hint about what we expected instead
DeserializationErrorSchema :: String -> DeserializationError
-- | The JSON file contains a key ID of an unknown key
DeserializationErrorUnknownKey :: KeyId -> DeserializationError
-- | Some verification step failed
DeserializationErrorValidation :: String -> DeserializationError
-- | Wrong file type
--
-- Records actual and expected types.
DeserializationErrorFileType :: String -> String -> DeserializationError
validate :: MonadError DeserializationError m => String -> Bool -> m ()
verifyType :: (ReportSchemaErrors m, MonadError DeserializationError m) => JSValue -> String -> m ()
-- | MonadReader-like monad, specialized to key environments
class (ReportSchemaErrors m, MonadError DeserializationError m) => MonadKeys m
localKeys :: MonadKeys m => (KeyEnv -> KeyEnv) -> m a -> m a
askKeys :: MonadKeys m => m KeyEnv
addKeys :: MonadKeys m => KeyEnv -> m a -> m a
withKeys :: MonadKeys m => KeyEnv -> m a -> m a
lookupKey :: MonadKeys m => KeyId -> m (Some PublicKey)
readKeyAsId :: MonadKeys m => JSValue -> m (Some PublicKey)
data ReadJSON_Keys_Layout a
data ReadJSON_Keys_NoLayout a
data ReadJSON_NoKeys_NoLayout a
runReadJSON_Keys_Layout :: KeyEnv -> RepoLayout -> ReadJSON_Keys_Layout a -> Either DeserializationError a
runReadJSON_Keys_NoLayout :: KeyEnv -> ReadJSON_Keys_NoLayout a -> Either DeserializationError a
runReadJSON_NoKeys_NoLayout :: ReadJSON_NoKeys_NoLayout a -> Either DeserializationError a
parseJSON_Keys_Layout :: FromJSON ReadJSON_Keys_Layout a => KeyEnv -> RepoLayout -> ByteString -> Either DeserializationError a
parseJSON_Keys_NoLayout :: FromJSON ReadJSON_Keys_NoLayout a => KeyEnv -> ByteString -> Either DeserializationError a
parseJSON_NoKeys_NoLayout :: FromJSON ReadJSON_NoKeys_NoLayout a => ByteString -> Either DeserializationError a
readJSON_Keys_Layout :: (FsRoot root, FromJSON ReadJSON_Keys_Layout a) => KeyEnv -> RepoLayout -> Path root -> IO (Either DeserializationError a)
readJSON_Keys_NoLayout :: (FsRoot root, FromJSON ReadJSON_Keys_NoLayout a) => KeyEnv -> Path root -> IO (Either DeserializationError a)
readJSON_NoKeys_NoLayout :: (FsRoot root, FromJSON ReadJSON_NoKeys_NoLayout a) => Path root -> IO (Either DeserializationError a)
data WriteJSON a
runWriteJSON :: RepoLayout -> WriteJSON a -> a
-- | Render to canonical JSON format
renderJSON :: ToJSON WriteJSON a => RepoLayout -> a -> ByteString
-- | Variation on renderJSON for files that don't require the repo
-- layout
renderJSON_NoLayout :: ToJSON Identity a => a -> ByteString
writeJSON :: ToJSON WriteJSON a => RepoLayout -> Path Absolute -> a -> IO ()
writeJSON_NoLayout :: ToJSON Identity a => Path Absolute -> a -> IO ()
writeKeyAsId :: Some PublicKey -> JSValue
class ToJSON m a
toJSON :: ToJSON m a => a -> m JSValue
class FromJSON m a
fromJSON :: FromJSON m a => JSValue -> m a
-- | Used in the ToJSON instance for Map
class ToObjectKey m a
toObjectKey :: ToObjectKey m a => a -> m String
-- | Used in the FromJSON instance for Map
class FromObjectKey m a
fromObjectKey :: FromObjectKey m a => String -> m (Maybe a)
-- | Monads in which we can report schema errors
class (Applicative m, Monad m) => ReportSchemaErrors m
expected :: ReportSchemaErrors m => Expected -> Maybe Got -> m a
type Expected = String
type Got = String
expected' :: ReportSchemaErrors m => Expected -> JSValue -> m a
fromJSObject :: ReportSchemaErrors m => JSValue -> m [(String, JSValue)]
-- | Extract a field from a JSON object
fromJSField :: (ReportSchemaErrors m, FromJSON m a) => JSValue -> String -> m a
fromJSOptField :: (ReportSchemaErrors m, FromJSON m a) => JSValue -> String -> m (Maybe a)
mkObject :: forall m. Monad m => [(String, m JSValue)] -> m JSValue
data JSValue
JSNull :: JSValue
JSBool :: !Bool -> JSValue
JSNum :: !Int54 -> JSValue
JSString :: String -> JSValue
JSArray :: [JSValue] -> JSValue
JSObject :: [(String, JSValue)] -> JSValue
-- | 54-bit integer values
--
-- JavaScript can only safely represent numbers between -(2^53 -
-- 1) and 2^53 - 1.
--
-- TODO: Although we introduce the type here, we don't actually do any
-- bounds checking and just inherit all type class instance from Int64.
-- We should probably define fromInteger to do bounds checking,
-- give different instances for type classes such as Bounded and
-- FiniteBits, etc.
data Int54
instance Control.Monad.Reader.Class.MonadReader Hackage.Security.TUF.Layout.Repo.RepoLayout Hackage.Security.JSON.WriteJSON
instance GHC.Base.Monad Hackage.Security.JSON.WriteJSON
instance GHC.Base.Applicative Hackage.Security.JSON.WriteJSON
instance GHC.Base.Functor Hackage.Security.JSON.WriteJSON
instance Control.Monad.Error.Class.MonadError Hackage.Security.JSON.DeserializationError Hackage.Security.JSON.ReadJSON_NoKeys_NoLayout
instance GHC.Base.Monad Hackage.Security.JSON.ReadJSON_NoKeys_NoLayout
instance GHC.Base.Applicative Hackage.Security.JSON.ReadJSON_NoKeys_NoLayout
instance GHC.Base.Functor Hackage.Security.JSON.ReadJSON_NoKeys_NoLayout
instance Control.Monad.Error.Class.MonadError Hackage.Security.JSON.DeserializationError Hackage.Security.JSON.ReadJSON_Keys_NoLayout
instance GHC.Base.Monad Hackage.Security.JSON.ReadJSON_Keys_NoLayout
instance GHC.Base.Applicative Hackage.Security.JSON.ReadJSON_Keys_NoLayout
instance GHC.Base.Functor Hackage.Security.JSON.ReadJSON_Keys_NoLayout
instance Control.Monad.Error.Class.MonadError Hackage.Security.JSON.DeserializationError Hackage.Security.JSON.ReadJSON_Keys_Layout
instance GHC.Base.Monad Hackage.Security.JSON.ReadJSON_Keys_Layout
instance GHC.Base.Applicative Hackage.Security.JSON.ReadJSON_Keys_Layout
instance GHC.Base.Functor Hackage.Security.JSON.ReadJSON_Keys_Layout
instance GHC.Show.Show Hackage.Security.JSON.DeserializationError
instance Hackage.Security.Util.JSON.ReportSchemaErrors Hackage.Security.JSON.ReadJSON_NoKeys_NoLayout
instance Hackage.Security.Util.JSON.ReportSchemaErrors Hackage.Security.JSON.ReadJSON_Keys_NoLayout
instance Hackage.Security.JSON.MonadKeys Hackage.Security.JSON.ReadJSON_Keys_NoLayout
instance Hackage.Security.Util.JSON.ReportSchemaErrors Hackage.Security.JSON.ReadJSON_Keys_Layout
instance Control.Monad.Reader.Class.MonadReader Hackage.Security.TUF.Layout.Repo.RepoLayout Hackage.Security.JSON.ReadJSON_Keys_Layout
instance Hackage.Security.JSON.MonadKeys Hackage.Security.JSON.ReadJSON_Keys_Layout
instance GHC.Exception.Type.Exception Hackage.Security.JSON.DeserializationError
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.JSON.DeserializationError
-- | Information about files
--
-- Intended to be double imported
--
--
-- import Hackage.Security.TUF.FileMap (FileMap)
-- import qualified Hackage.Security.TUF.FileMap as FileMap
--
module Hackage.Security.TUF.FileMap
-- | Mapping from paths to file info
--
-- File maps are used in target files; the paths are relative to the
-- location of the target files containing the file map.
data FileMap
-- | Entries in FileMap either talk about the repository or the
-- index
data TargetPath
TargetPathRepo :: RepoPath -> TargetPath
TargetPathIndex :: IndexPath -> TargetPath
empty :: FileMap
lookup :: TargetPath -> FileMap -> Maybe FileInfo
(!) :: FileMap -> TargetPath -> FileInfo
insert :: TargetPath -> FileInfo -> FileMap -> FileMap
fromList :: [(TargetPath, FileInfo)] -> FileMap
lookupM :: Monad m => FileMap -> TargetPath -> m FileInfo
data FileChange
-- | File got added or modified; we record the new file info
FileChanged :: FileInfo -> FileChange
-- | File got deleted
FileDeleted :: FileChange
fileMapChanges :: FileMap -> FileMap -> Map TargetPath FileChange
instance GHC.Show.Show Hackage.Security.TUF.FileMap.FileChange
instance GHC.Show.Show Hackage.Security.TUF.FileMap.FileMap
instance GHC.Classes.Ord Hackage.Security.TUF.FileMap.TargetPath
instance GHC.Classes.Eq Hackage.Security.TUF.FileMap.TargetPath
instance GHC.Show.Show Hackage.Security.TUF.FileMap.TargetPath
instance GHC.Base.Monad m => Hackage.Security.Util.JSON.ToJSON m Hackage.Security.TUF.FileMap.FileMap
instance Hackage.Security.Util.JSON.ReportSchemaErrors m => Hackage.Security.Util.JSON.FromJSON m Hackage.Security.TUF.FileMap.FileMap
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.TUF.FileMap.TargetPath
instance GHC.Base.Monad m => Hackage.Security.Util.JSON.ToObjectKey m Hackage.Security.TUF.FileMap.TargetPath
instance Hackage.Security.Util.JSON.ReportSchemaErrors m => Hackage.Security.Util.JSON.FromObjectKey m Hackage.Security.TUF.FileMap.TargetPath
module Hackage.Security.Trusted
-- | A reference to a value of type a.
data StaticPtr a
type VerificationHistory = [Either RootUpdated VerificationError]
-- | Root metadata updated (as part of the normal update process)
data RootUpdated
RootUpdated :: RootUpdated
-- | Errors thrown during role validation
data VerificationError
-- | Not enough signatures signed with the appropriate keys
VerificationErrorSignatures :: TargetPath -> VerificationError
-- | The file is expired
VerificationErrorExpired :: TargetPath -> VerificationError
-- | The file version is less than the previous version
VerificationErrorVersion :: TargetPath -> VerificationError
-- | File information mismatch
VerificationErrorFileInfo :: TargetPath -> VerificationError
-- | We tried to lookup file information about a particular target file,
-- but the information wasn't in the corresponding targets.json
-- file.
VerificationErrorUnknownTarget :: TargetPath -> VerificationError
-- | The metadata for the specified target is missing a SHA256
VerificationErrorMissingSHA256 :: TargetPath -> VerificationError
-- | Some verification errors materialize as deserialization errors
--
-- For example: if we try to deserialize a timestamp file but the
-- timestamp key has been rolled over, deserialization of the file will
-- fail with DeserializationErrorUnknownKey.
VerificationErrorDeserialization :: TargetPath -> DeserializationError -> VerificationError
-- | The spec stipulates that if a verification error occurs during the
-- check for updates, we must download new root information and start
-- over. However, we limit how often we attempt this.
--
-- We record all verification errors that occurred before we gave up.
VerificationErrorLoop :: VerificationHistory -> VerificationError
data SignaturesVerified a
-- | Trusted values
--
-- Trusted values originate in only two ways:
--
--
-- - Anything that is statically known is trusted
-- (trustStatic)
-- - If we have "dynamic" data we can trust it once we have verified
-- the the signatures (trustSigned).
--
--
-- NOTE: Trusted is NOT a functor. If it was we could define
--
--
-- trustAnything :: a -> Trusted a
-- trustAnything a = fmap (const a) (trustStatic (static ()))
--
--
-- Consequently, it is neither a monad nor a comonad. However, we _can_
-- apply trusted functions to trusted arguments (trustApply).
--
-- The DeclareTrusted constructor is exported, but any use of it
-- should be verified.
data Trusted a
trustStatic :: StaticPtr a -> Trusted a
trustVerified :: SignaturesVerified a -> Trusted a
-- | Equivalent of <*>
--
-- Trusted isn't quite applicative (no pure, not a functor), but it is
-- somehow Applicative-like: we have the equivalent of <*>
trustApply :: Trusted (a -> b) -> Trusted a -> Trusted b
-- | Trust all elements of some trusted (traversable) container
--
-- If we have, say, a trusted list of values, we should be able to get a
-- list of trusted values out of it.
--
--
-- trustElems :: Trusted [a] -> [Trusted a]
--
--
-- NOTE. It might appear that the more natural primitive to offer is a
-- sequenceA-like operator such as
--
--
-- trustSeq :: Applicative f => Trusted (f a) -> f (Trusted a)
--
--
-- However, this is unsound. To see this, consider that ((->)
-- a) is Applicative (it's the reader monad); hence, we can
-- instantiate trustSeq at
--
--
-- trustSeq :: Trusted (a -> a) -> a -> Trusted a
--
--
-- and by passing trustStatic (static id) make Trusted a
-- functor, which we certainly don't want to do (see comments for
-- Trusted).
--
-- So why is it okay when we insist on Traversable rather than
-- Applicative? To see this, it's instructive to consider how we
-- might make a ((->) a) an instance of Traversable.
-- If we define the domain of enumerable types as
--
--
-- class Eq a => Enumerable a where
-- enumerate :: [a]
--
--
-- then we can make ((->) r) traversable by
--
--
-- instance Enumerable r => Traversable ((->) r) where
-- sequenceA f = rebuild <$> sequenceA ((\r -> (r,) <$> f r) <$> enumerate)
-- where
-- rebuild :: [(r, a)] -> r -> a
-- rebuild fun arg = fromJust (lookup arg fun)
--
--
-- The idea is that if the domain of a function is enumerable, we can
-- apply the function to each possible input, collect the outputs, and
-- construct a new function by pairing the inputs with the outputs. I.e.,
-- if we had something of type
--
--
-- a -> IO b
--
--
-- and a is enumerable, we just run the IO action on
-- each possible a and collect all bs to get a pure
-- function a -> b. Of course, you probably don't want to be
-- doing that, but the point is that as far as the type system is
-- concerned you could.
--
-- In the context of Trusted, this means that we can derive
--
--
-- enumPure :: Enumerable a => a -> Trusted a
--
--
-- but in a way this this makes sense anyway. If a domain is enumerable,
-- it would not be unreasonable to change Enumerable to
--
--
-- class Eq a => Enumerable a where
-- enumerate :: [StaticPtr a]
--
--
-- so we could define enumPure as
--
--
-- enumPure :: Enumerable a => a -> Trusted a
-- enumPure x = trustStatic
-- $ fromJust (find ((== x) . deRefStaticPtr) enumerate)
--
--
-- In other words, we just enumerate the entire domain as trusted values
-- (because we defined them locally) and then return the one that matched
-- the untrusted value.
--
-- The conclusion from all of this is that the types of untrusted input
-- (like the types of the TUF files we download from the server) should
-- probably not be considered enumerable.
trustElems :: Traversable f => Trusted (f a) -> f (Trusted a)
-- | Role verification
--
-- NOTE: We throw an error when the version number _decreases_, but allow
-- it to be the same. This is sufficient: the file number is there so
-- that attackers cannot replay old files. It cannot protect against
-- freeze attacks (that's what the expiry date is for), so "replaying"
-- the same file is not a problem. If an attacker changes the contents of
-- the file but not the version number we have an inconsistent situation,
-- but this is not something we need to worry about: in this case the
-- attacker will need to resign the file or otherwise the signature won't
-- match, and if the attacker has compromised the key then he might just
-- as well increase the version number and resign.
--
-- NOTE 2: We are not actually verifying the signatures _themselves_ here
-- (we did that when we parsed the JSON). We are merely verifying the
-- provenance of the keys.
verifyRole' :: forall a. HasHeader a => Trusted (RoleSpec a) -> TargetPath -> Maybe FileVersion -> Maybe UTCTime -> Signed a -> Either VerificationError (SignaturesVerified a)
-- | Variation on verifyRole that uses key IDs rather than keys
--
-- This is used during the bootstrap process.
--
-- See http://en.wikipedia.org/wiki/Public_key_fingerprint.
verifyFingerprints :: [KeyId] -> KeyThreshold -> TargetPath -> Signed Root -> Either VerificationError (SignaturesVerified Root)
-- | Apply a static function to a trusted argument
(<$$>) :: StaticPtr (a -> b) -> Trusted a -> Trusted b
class VerifyRole a
verifyRole :: VerifyRole a => Trusted Root -> TargetPath -> Maybe FileVersion -> Maybe UTCTime -> Signed a -> Either VerificationError (SignaturesVerified a)
-- | Variation on knownFileInfoEqual for Trusted
-- FileInfo
trustedFileInfoEqual :: Trusted FileInfo -> Trusted FileInfo -> Bool
instance Hackage.Security.Trusted.VerifyRole Hackage.Security.TUF.Root.Root
instance Hackage.Security.Trusted.VerifyRole Hackage.Security.TUF.Timestamp.Timestamp
instance Hackage.Security.Trusted.VerifyRole Hackage.Security.TUF.Snapshot.Snapshot
instance Hackage.Security.Trusted.VerifyRole Hackage.Security.TUF.Mirrors.Mirrors
-- | Abstract definition of a Repository
--
-- Most clients should only need to import this module if they wish to
-- define their own Repository implementations.
module Hackage.Security.Client.Repository
data Metadata
data Binary
-- | Abstract definition of files we might have to download
--
-- RemoteFile is parametrized by the type of the formats that we
-- can accept from the remote repository, as well as with information on
-- whether this file is metadata actual binary content.
--
-- NOTE: Haddock lacks GADT support so constructors have only regular
-- comments.
data RemoteFile :: * -> * -> *
[RemoteTimestamp] :: RemoteFile (FormatUn :- ()) Metadata
[RemoteRoot] :: Maybe (Trusted FileInfo) -> RemoteFile (FormatUn :- ()) Metadata
[RemoteSnapshot] :: Trusted FileInfo -> RemoteFile (FormatUn :- ()) Metadata
[RemoteMirrors] :: Trusted FileInfo -> RemoteFile (FormatUn :- ()) Metadata
[RemoteIndex] :: HasFormat fs FormatGz -> Formats fs (Trusted FileInfo) -> RemoteFile fs Binary
[RemotePkgTarGz] :: PackageIdentifier -> Trusted FileInfo -> RemoteFile (FormatGz :- ()) Binary
-- | Files that we might request from the local cache
data CachedFile
-- | Timestamp metadata (timestamp.json)
CachedTimestamp :: CachedFile
-- | Root metadata (root.json)
CachedRoot :: CachedFile
-- | Snapshot metadata (snapshot.json)
CachedSnapshot :: CachedFile
-- | Mirrors list (mirrors.json)
CachedMirrors :: CachedFile
-- | Files that we might request from the index
--
-- The type index tells us the type of the decoded file, if any. For
-- files for which the library does not support decoding this will be
-- (). NOTE: Clients should NOT rely on this type index being
-- (), or they might break if we add support for parsing
-- additional file formats in the future.
--
-- TODO: If we wanted to support legacy Hackage, we should also have a
-- case for the global preferred-versions file. But supporting legacy
-- Hackage will probably require more work anyway..
data IndexFile :: * -> *
[IndexPkgMetadata] :: PackageIdentifier -> IndexFile (Signed Targets)
[IndexPkgCabal] :: PackageIdentifier -> IndexFile ()
[IndexPkgPrefs] :: PackageName -> IndexFile ()
-- | Default format for each file type
--
-- For most file types we don't have a choice; for the index the
-- repository is only required to offer the GZip-compressed format so
-- that is the default.
remoteFileDefaultFormat :: RemoteFile fs typ -> Some (HasFormat fs)
-- | Default file info (see also remoteFileDefaultFormat)
remoteFileDefaultInfo :: RemoteFile fs typ -> Maybe (Trusted FileInfo)
-- | Repository
--
-- This is an abstract representation of a repository. It simply provides
-- a way to download metafiles and target files, without specifying how
-- this is done. For instance, for a local repository this could just be
-- doing a file read, whereas for remote repositories this could be using
-- any kind of HTTP client.
data Repository down
Repository :: (forall fs typ. Throws SomeRemoteError => AttemptNr -> RemoteFile fs typ -> Verify (Some (HasFormat fs), down typ)) -> (CachedFile -> IO (Maybe (Path Absolute))) -> IO (Path Absolute) -> IO () -> (forall a. (Handle -> IO a) -> IO a) -> IO TarIndex -> (IO () -> IO ()) -> (forall a. Maybe [Mirror] -> IO a -> IO a) -> (LogMessage -> IO ()) -> RepoLayout -> IndexLayout -> String -> Repository down
-- | Get a file from the server
--
-- Responsibilies of repGetRemote:
--
--
-- - Download the file from the repository and make it available at a
-- temporary location
-- - Use the provided file length to protect against endless data
-- attacks. (Repositories such as local repositories that are not
-- suspectible to endless data attacks can safely ignore this
-- argument.)
-- - Move the file from its temporary location to its permanent
-- location if verification succeeds.
--
--
-- NOTE: Calls to repGetRemote should _always_ be in the scope of
-- repWithMirror.
[repGetRemote] :: Repository down -> forall fs typ. Throws SomeRemoteError => AttemptNr -> RemoteFile fs typ -> Verify (Some (HasFormat fs), down typ)
-- | Get a cached file (if available)
[repGetCached] :: Repository down -> CachedFile -> IO (Maybe (Path Absolute))
-- | Get the cached root
--
-- This is a separate method only because clients must ALWAYS have root
-- information available.
[repGetCachedRoot] :: Repository down -> IO (Path Absolute)
-- | Clear all cached data
--
-- In particular, this should remove the snapshot and the timestamp. It
-- would also be okay, but not required, to delete the index.
[repClearCache] :: Repository down -> IO ()
-- | Open the tarball for reading
--
-- This function has this shape so that:
--
--
-- - We can read multiple files from the tarball without having to open
-- and close the handle each time
-- - We can close the handle immediately when done.
--
[repWithIndex] :: Repository down -> forall a. (Handle -> IO a) -> IO a
-- | Read the index index
[repGetIndexIdx] :: Repository down -> IO TarIndex
-- | Lock the cache (during updates)
[repLockCache] :: Repository down -> IO () -> IO ()
-- | Mirror selection
--
-- The purpose of repWithMirror is to scope mirror selection. The
-- idea is that if we have
--
--
-- repWithMirror mirrorList $
-- someCallback
--
--
-- then the repository may pick a mirror before calling
-- someCallback, catch exceptions thrown by
-- someCallback, and potentially try the callback again with a
-- different mirror.
--
-- The list of mirrors may be Nothing if we haven't yet
-- downloaded the list of mirrors from the repository, or when our cached
-- list of mirrors is invalid. Of course, if we did download it, then the
-- list of mirrors may still be empty. In this case the repository must
-- fall back to its primary download mechanism.
--
-- Mirrors as currently defined (in terms of a "base URL") are inherently
-- a HTTP (or related) concept, so in repository implementations such as
-- the local-repo repWithMirrors is probably just an identity
-- operation (see ignoreMirrors). Conversely, HTTP
-- implementations of repositories may have other, out-of-band
-- information (for example, coming from a cabal config file) that they
-- may use to influence mirror selection.
[repWithMirror] :: Repository down -> forall a. Maybe [Mirror] -> IO a -> IO a
-- | Logging
[repLog] :: Repository down -> LogMessage -> IO ()
-- | Layout of this repository
[repLayout] :: Repository down -> RepoLayout
-- | Layout of the index
--
-- Since the repository hosts the index, the layout of the index is not
-- independent of the layout of the repository.
[repIndexLayout] :: Repository down -> IndexLayout
-- | Description of the repository (used in the show instance)
[repDescription] :: Repository down -> String
-- | Are we requesting this information because of a previous validation
-- error?
--
-- Clients can take advantage of this to tell caches to revalidate files.
newtype AttemptNr
AttemptNr :: Int -> AttemptNr
-- | Log messages
--
-- We use a RemoteFile rather than a RepoPath here because
-- we might not have a RepoPath for the file that we were trying
-- to download (that is, for example if the server does not provide an
-- uncompressed tarball, it doesn't make much sense to list the path to
-- that non-existing uncompressed tarball).
data LogMessage
-- | Root information was updated
--
-- This message is issued when the root information is updated as part of
-- the normal check for updates procedure. If the root information is
-- updated because of a verification error WarningVerificationError is
-- issued instead.
LogRootUpdated :: LogMessage
-- | A verification error
--
-- Verification errors can be temporary, and may be resolved later; hence
-- these are just warnings. (Verification errors that cannot be resolved
-- are thrown as exceptions.)
LogVerificationError :: VerificationError -> LogMessage
-- | Download a file from a repository
LogDownloading :: RemoteFile fs typ -> LogMessage
-- | Incrementally updating a file from a repository
LogUpdating :: RemoteFile fs Binary -> LogMessage
-- | Selected a particular mirror
LogSelectedMirror :: MirrorDescription -> LogMessage
-- | Updating a file failed (we will instead download it whole)
LogCannotUpdate :: RemoteFile fs Binary -> UpdateFailure -> LogMessage
-- | We got an exception with a particular mirror (we will try with a
-- different mirror if any are available)
LogMirrorFailed :: MirrorDescription -> SomeException -> LogMessage
-- | Records why we are downloading a file rather than updating it.
data UpdateFailure
-- | Server does not support incremental downloads
UpdateImpossibleUnsupported :: UpdateFailure
-- | We don't have a local copy of the file to update
UpdateImpossibleNoLocalCopy :: UpdateFailure
-- | Update failed twice
--
-- If we attempt an incremental update the first time, and it fails, we
-- let it go round the loop, update local security information, and try
-- again. But if an incremental update then fails _again_, we instead
-- attempt a regular download.
UpdateFailedTwice :: UpdateFailure
-- | Update failed (for example: perhaps the local file got corrupted)
UpdateFailed :: SomeException -> UpdateFailure
-- | Repository-specific exceptions
--
-- For instance, for repositories using HTTP this might correspond to a
-- 404; for local repositories this might correspond to file-not-found,
-- etc.
data SomeRemoteError :: *
[SomeRemoteError] :: Exception e => e -> SomeRemoteError
class DownloadedFile (down :: * -> *)
-- | Verify a download file
downloadedVerify :: DownloadedFile down => down a -> Trusted FileInfo -> IO Bool
-- | Read the file we just downloaded into memory
--
-- We never read binary data, only metadata.
downloadedRead :: DownloadedFile down => down Metadata -> IO ByteString
-- | Copy a downloaded file to its destination
downloadedCopyTo :: DownloadedFile down => down a -> Path Absolute -> IO ()
-- | Helper function to implement repWithMirrors.
mirrorsUnsupported :: Maybe [Mirror] -> IO a -> IO a
remoteRepoPath :: RepoLayout -> RemoteFile fs typ -> Formats fs RepoPath
remoteRepoPath' :: RepoLayout -> RemoteFile fs typ -> HasFormat fs f -> RepoPath
-- | Is a particular remote file cached?
data IsCached :: * -> *
[CacheAs] :: CachedFile -> IsCached Metadata
[DontCache] :: IsCached Binary
[CacheIndex] :: IsCached Binary
-- | Which remote files should we cache locally?
mustCache :: RemoteFile fs typ -> IsCached typ
instance GHC.Num.Num Hackage.Security.Client.Repository.AttemptNr
instance GHC.Classes.Ord Hackage.Security.Client.Repository.AttemptNr
instance GHC.Classes.Eq Hackage.Security.Client.Repository.AttemptNr
instance GHC.Show.Show Hackage.Security.Client.Repository.CachedFile
instance GHC.Classes.Ord Hackage.Security.Client.Repository.CachedFile
instance GHC.Classes.Eq Hackage.Security.Client.Repository.CachedFile
instance GHC.Show.Show (Hackage.Security.Client.Repository.RemoteFile fs typ)
instance GHC.Show.Show Hackage.Security.Client.Repository.SomeRemoteError
instance GHC.Classes.Eq (Hackage.Security.Client.Repository.IsCached typ)
instance GHC.Show.Show (Hackage.Security.Client.Repository.IsCached typ)
instance GHC.Show.Show (Hackage.Security.Client.Repository.Repository down)
instance GHC.Exception.Type.Exception Hackage.Security.Client.Repository.SomeRemoteError
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.Repository.SomeRemoteError
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.Repository.LogMessage
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.Repository.UpdateFailure
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.Repository.CachedFile
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Client.Repository.RemoteFile fs typ)
-- | Abstracting over HTTP libraries
module Hackage.Security.Client.Repository.HttpLib
-- | Abstraction over HTTP clients
--
-- This avoids insisting on a particular implementation (such as the HTTP
-- package) and allows for other implementations (such as a conduit based
-- one).
--
-- NOTE: Library-specific exceptions MUST be wrapped in
-- SomeRemoteError.
data HttpLib
HttpLib :: (forall a. Throws SomeRemoteError => [HttpRequestHeader] -> URI -> ([HttpResponseHeader] -> BodyReader -> IO a) -> IO a) -> (forall a. Throws SomeRemoteError => [HttpRequestHeader] -> URI -> (Int, Int) -> (HttpStatus -> [HttpResponseHeader] -> BodyReader -> IO a) -> IO a) -> HttpLib
-- | Download a file
[httpGet] :: HttpLib -> forall a. Throws SomeRemoteError => [HttpRequestHeader] -> URI -> ([HttpResponseHeader] -> BodyReader -> IO a) -> IO a
-- | Download a byte range
--
-- Range is starting and (exclusive) end offset in bytes.
--
-- HTTP servers are normally expected to respond to a range request with
-- a "206 Partial Content" response. However, servers can respond with a
-- "200 OK" response, sending the entire file instead (for instance, this
-- may happen for servers that don't actually support range rqeuests, but
-- for which we optimistically assumed they did). Implementations of
-- HttpLib may accept such a response and inform the
-- hackage-security library that the whole file is being
-- returned; the security library can then decide to execute the
-- BodyReader anyway (downloading the entire file) or abort the
-- request and try something else. For this reason the security library
-- must be informed whether the server returned the full file or the
-- requested range.
[httpGetRange] :: HttpLib -> forall a. Throws SomeRemoteError => [HttpRequestHeader] -> URI -> (Int, Int) -> (HttpStatus -> [HttpResponseHeader] -> BodyReader -> IO a) -> IO a
-- | Additional request headers
--
-- Since different libraries represent headers differently, here we just
-- abstract over the few request headers that we might want to set
data HttpRequestHeader
-- | Set Cache-Control: max-age=0
HttpRequestMaxAge0 :: HttpRequestHeader
-- | Set Cache-Control: no-transform
HttpRequestNoTransform :: HttpRequestHeader
-- | Response headers
--
-- Since different libraries represent headers differently, here we just
-- abstract over the few response headers that we might want to know
-- about.
data HttpResponseHeader
-- | Server accepts byte-range requests (Accept-Ranges: bytes)
HttpResponseAcceptRangesBytes :: HttpResponseHeader
-- | HTTP status code
data HttpStatus
-- | 200 OK
HttpStatus200OK :: HttpStatus
-- | 206 Partial Content
HttpStatus206PartialContent :: HttpStatus
-- | Proxy configuration
--
-- Although actually setting the proxy is the purview of the
-- initialization function for individual HttpLib implementations
-- and therefore outside the scope of this module, we offer this
-- ProxyConfiguration type here as a way to uniformly configure
-- proxies across all HttpLibs.
data ProxyConfig a
-- | Don't use a proxy
ProxyConfigNone :: ProxyConfig a
-- | Use this specific proxy
--
-- Individual HTTP backends use their own types for specifying proxies.
ProxyConfigUse :: a -> ProxyConfig a
-- | Use automatic proxy settings
--
-- What precisely automatic means is HttpLib specific, though
-- typically it will involve looking at the HTTP_PROXY
-- environment variable or the (Windows) registry.
ProxyConfigAuto :: ProxyConfig a
-- | An IO action that represents an incoming response body coming
-- from the server.
--
-- The action gets a single chunk of data from the response body, or an
-- empty bytestring if no more data is available.
--
-- This definition is copied from the http-client package.
type BodyReader = IO ByteString
-- | Construct a Body reader from a lazy bytestring
--
-- This is appropriate if the lazy bytestring is constructed, say, by
-- calling hGetContents on a network socket, and the chunks of
-- the bytestring correspond to the chunks as they are returned from the
-- OS network layer.
--
-- If the lazy bytestring needs to be re-chunked this function is NOT
-- suitable.
bodyReaderFromBS :: ByteString -> IO BodyReader
instance GHC.Show.Show Hackage.Security.Client.Repository.HttpLib.HttpResponseHeader
instance GHC.Classes.Ord Hackage.Security.Client.Repository.HttpLib.HttpResponseHeader
instance GHC.Classes.Eq Hackage.Security.Client.Repository.HttpLib.HttpResponseHeader
instance GHC.Show.Show Hackage.Security.Client.Repository.HttpLib.HttpRequestHeader
instance GHC.Classes.Ord Hackage.Security.Client.Repository.HttpLib.HttpRequestHeader
instance GHC.Classes.Eq Hackage.Security.Client.Repository.HttpLib.HttpRequestHeader
-- | The files we cache from the repository
--
-- Both the Local and the Remote repositories make use of this module.
module Hackage.Security.Client.Repository.Cache
-- | Location and layout of the local cache
data Cache
Cache :: Path Absolute -> CacheLayout -> Cache
[cacheRoot] :: Cache -> Path Absolute
[cacheLayout] :: Cache -> CacheLayout
-- | Get a cached file (if available)
getCached :: Cache -> CachedFile -> IO (Maybe (Path Absolute))
-- | Get the cached root
--
-- Calling getCachedRoot without root info available is a
-- programmer error and will result in an unchecked exception. See
-- requiresBootstrap.
getCachedRoot :: Cache -> IO (Path Absolute)
-- | Get the cached index (if available)
getCachedIndex :: Cache -> Format f -> IO (Maybe (Path Absolute))
-- | Delete a previously downloaded remote file
clearCache :: Cache -> IO ()
withIndex :: Cache -> (Handle -> IO a) -> IO a
getIndexIdx :: Cache -> IO TarIndex
-- | Cache a previously downloaded remote file
cacheRemoteFile :: forall down typ f. DownloadedFile down => Cache -> down typ -> Format f -> IsCached typ -> IO ()
-- | Lock the cache
--
-- This avoids two concurrent processes updating the cache at the same
-- time, provided they both take the lock.
lockCache :: Cache -> IO () -> IO ()
-- | An implementation of Repository that talks to repositories over HTTP.
--
-- This implementation is itself parameterized over a
-- HttpClient, so that it it not tied to a specific library; for
-- instance, HttpClient can be implemented with the
-- HTTP library, the http-client libary, or others.
--
-- It would also be possible to give _other_ Repository implementations
-- that talk to repositories over HTTP, if you want to make other design
-- decisions than we did here, in particular:
--
--
-- - We attempt to do incremental downloads of the index when
-- possible.
-- - We reuse the Repository.Local to deal with the local
-- cache.
-- - We download timestamp.json and snapshot.json
-- together. This is implemented here because:
-- - One level down (HttpClient) we have no access to the local
-- cache
-- - One level up (Repository API) would require _all_ Repositories to
-- implement this optimization.
--
module Hackage.Security.Client.Repository.Remote
-- | Initialize the repository (and cleanup resources afterwards)
--
-- We allow to specify multiple mirrors to initialize the repository.
-- These are mirrors that can be found "out of band" (out of the scope of
-- the TUF protocol), for example in a cabal.config file. The
-- TUF protocol itself will specify that any of these mirrors can serve a
-- mirrors.json file that itself contains mirrors; we consider
-- these as _additional_ mirrors to the ones that are passed here.
--
-- NOTE: The list of mirrors should be non-empty (and should typically
-- include the primary server).
--
-- TODO: In the future we could allow finer control over precisely which
-- mirrors we use (which combination of the mirrors that are passed as
-- arguments here and the mirrors that we get from mirrors.json)
-- as well as indicating mirror preferences.
withRepository :: HttpLib -> [URI] -> RepoOpts -> Cache -> RepoLayout -> IndexLayout -> (LogMessage -> IO ()) -> (Repository RemoteTemp -> IO a) -> IO a
-- | Repository options with a reasonable default
--
-- Clients should use defaultRepositoryOpts and override
-- required settings.
data RepoOpts
RepoOpts :: Bool -> RepoOpts
-- | Allow additional mirrors?
--
-- If this is set to True (default), in addition to the (out-of-band)
-- specified mirrors we will also use mirrors reported by those
-- out-of-band mirrors (that is, mirrors.json).
[repoAllowAdditionalMirrors] :: RepoOpts -> Bool
-- | Default repository options
defaultRepoOpts :: RepoOpts
data RemoteTemp :: * -> *
data FileSize
-- | For most files we download we know the exact size beforehand (because
-- this information comes from the snapshot or delegated info)
FileSizeExact :: Int54 -> FileSize
-- | For some files we might not know the size beforehand, but we might be
-- able to provide an upper bound (timestamp, root info)
FileSizeBound :: Int54 -> FileSize
fileSizeWithinBounds :: Int54 -> FileSize -> Bool
instance GHC.Show.Show Hackage.Security.Client.Repository.Remote.FileSize
instance GHC.Show.Show Hackage.Security.Client.Repository.Remote.FileTooLarge
instance Hackage.Security.Util.Pretty.Pretty (Hackage.Security.Client.Repository.Remote.RemoteTemp typ)
instance Hackage.Security.Client.Repository.DownloadedFile Hackage.Security.Client.Repository.Remote.RemoteTemp
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.Repository.Remote.FileTooLarge
instance GHC.Exception.Type.Exception Hackage.Security.Client.Repository.Remote.FileTooLarge
-- | Local repository
module Hackage.Security.Client.Repository.Local
-- | Location of the repository
--
-- Note that we regard the local repository as immutable; we cache files
-- just like we do for remote repositories.
type LocalRepo = Path Absolute
data LocalFile a
-- | Initialize the repository (and cleanup resources afterwards)
--
-- Like a remote repository, a local repository takes a RepoLayout as
-- argument; but where the remote repository interprets this RepoLayout
-- relative to a URL, the local repository interprets it relative to a
-- local directory.
--
-- It uses the same cache as the remote repository.
withRepository :: LocalRepo -> Cache -> RepoLayout -> IndexLayout -> (LogMessage -> IO ()) -> (Repository LocalFile -> IO a) -> IO a
instance Hackage.Security.Client.Repository.DownloadedFile Hackage.Security.Client.Repository.Local.LocalFile
-- | Main entry point into the Hackage Security framework for clients
module Hackage.Security.Server
class FromJSON m a
fromJSON :: FromJSON m a => JSValue -> m a
class ToJSON m a
toJSON :: ToJSON m a => a -> m JSValue
data WriteJSON a
data ReadJSON_NoKeys_NoLayout a
data ReadJSON_Keys_NoLayout a
data ReadJSON_Keys_Layout a
data DeserializationError
-- | Malformed JSON has syntax errors in the JSON itself (i.e., we cannot
-- even parse it to a JSValue)
DeserializationErrorMalformed :: String -> DeserializationError
-- | Invalid JSON has valid syntax but invalid structure
--
-- The string gives a hint about what we expected instead
DeserializationErrorSchema :: String -> DeserializationError
-- | The JSON file contains a key ID of an unknown key
DeserializationErrorUnknownKey :: KeyId -> DeserializationError
-- | Some verification step failed
DeserializationErrorValidation :: String -> DeserializationError
-- | Wrong file type
--
-- Records actual and expected types.
DeserializationErrorFileType :: String -> String -> DeserializationError
parseJSON_Keys_Layout :: FromJSON ReadJSON_Keys_Layout a => KeyEnv -> RepoLayout -> ByteString -> Either DeserializationError a
parseJSON_Keys_NoLayout :: FromJSON ReadJSON_Keys_NoLayout a => KeyEnv -> ByteString -> Either DeserializationError a
parseJSON_NoKeys_NoLayout :: FromJSON ReadJSON_NoKeys_NoLayout a => ByteString -> Either DeserializationError a
readJSON_Keys_Layout :: (FsRoot root, FromJSON ReadJSON_Keys_Layout a) => KeyEnv -> RepoLayout -> Path root -> IO (Either DeserializationError a)
readJSON_Keys_NoLayout :: (FsRoot root, FromJSON ReadJSON_Keys_NoLayout a) => KeyEnv -> Path root -> IO (Either DeserializationError a)
readJSON_NoKeys_NoLayout :: (FsRoot root, FromJSON ReadJSON_NoKeys_NoLayout a) => Path root -> IO (Either DeserializationError a)
-- | Render to canonical JSON format
renderJSON :: ToJSON WriteJSON a => RepoLayout -> a -> ByteString
-- | Variation on renderJSON for files that don't require the repo
-- layout
renderJSON_NoLayout :: ToJSON Identity a => a -> ByteString
writeJSON :: ToJSON WriteJSON a => RepoLayout -> Path Absolute -> a -> IO ()
writeJSON_NoLayout :: ToJSON Identity a => Path Absolute -> a -> IO ()
data Ed25519
data Key a
[KeyEd25519] :: PublicKey -> SecretKey -> Key Ed25519
data PublicKey a
[PublicKeyEd25519] :: PublicKey -> PublicKey Ed25519
data PrivateKey a
[PrivateKeyEd25519] :: SecretKey -> PrivateKey Ed25519
data KeyType typ
[KeyTypeEd25519] :: KeyType Ed25519
somePublicKey :: Some Key -> Some PublicKey
somePublicKeyType :: Some PublicKey -> Some KeyType
someKeyId :: HasKeyId key => Some key -> KeyId
publicKey :: Key a -> PublicKey a
privateKey :: Key a -> PrivateKey a
createKey :: KeyType key -> IO (Key key)
createKey' :: KeyType key -> IO (Some Key)
-- | The key ID of a key, by definition, is the hexdigest of the SHA-256
-- hash of the canonical JSON form of the key where the private object
-- key is excluded.
--
-- NOTE: The FromJSON and ToJSON instances for KeyId are ntentially
-- omitted. Use writeKeyAsId instead.
newtype KeyId
KeyId :: String -> KeyId
[keyIdString] :: KeyId -> String
-- | Compute the key ID of a key
class HasKeyId key
keyId :: HasKeyId key => key typ -> KeyId
-- | Sign a bytestring and return the signature
--
-- TODO: It is unfortunate that we have to convert to a strict bytestring
-- for ed25519
sign :: PrivateKey typ -> ByteString -> ByteString
verify :: PublicKey typ -> ByteString -> ByteString -> Bool
-- | File length
--
-- Having verified file length information means we can protect against
-- endless data attacks and similar.
newtype FileLength
FileLength :: Int54 -> FileLength
[fileLength] :: FileLength -> Int54
-- | File hash
newtype Hash
Hash :: String -> Hash
-- | Key threshold
--
-- The key threshold is the minimum number of keys a document must be
-- signed with. Key thresholds are specified in RoleSpec or
-- DelegationsSpec.
newtype KeyThreshold
KeyThreshold :: Int54 -> KeyThreshold
-- | File information
--
-- This intentionally does not have an Eq instance; see
-- knownFileInfoEqual and verifyFileInfo instead.
--
-- NOTE: Throughout we compute file information always over the raw
-- bytes. For example, when timestamp.json lists the hash of
-- snapshot.json, this hash is computed over the actual
-- snapshot.json file (as opposed to the canonical form of the
-- embedded JSON). This brings it in line with the hash computed over
-- target files, where that is the only choice available.
data FileInfo
FileInfo :: FileLength -> Map HashFn Hash -> FileInfo
[fileInfoLength] :: FileInfo -> FileLength
[fileInfoHashes] :: FileInfo -> Map HashFn Hash
data HashFn
HashFnSHA256 :: HashFn
HashFnMD5 :: HashFn
-- | File hash
newtype Hash
Hash :: String -> Hash
-- | Compute FileInfo
--
-- TODO: Currently this will load the entire input bytestring into
-- memory. We need to make this incremental, by computing the length and
-- all hashes in a single traversal over the input.
fileInfo :: ByteString -> FileInfo
-- | Compute FileInfo
computeFileInfo :: FsRoot root => Path root -> IO FileInfo
-- | Compare the expected trusted file info against the actual file info of
-- a target file.
--
-- This should be used only when the FileInfo is already known. If
-- we want to compare known FileInfo against a file on disk we
-- should delay until we have confirmed that the file lengths match (see
-- downloadedVerify).
compareTrustedFileInfo :: FileInfo -> FileInfo -> Bool
knownFileInfoEqual :: FileInfo -> FileInfo -> Bool
-- | Extract SHA256 hash from FileInfo (if present)
fileInfoSHA256 :: FileInfo -> Maybe Hash
-- | 54-bit integer values
--
-- JavaScript can only safely represent numbers between -(2^53 -
-- 1) and 2^53 - 1.
--
-- TODO: Although we introduce the type here, we don't actually do any
-- bounds checking and just inherit all type class instance from Int64.
-- We should probably define fromInteger to do bounds checking,
-- give different instances for type classes such as Bounded and
-- FiniteBits, etc.
data Int54
data FileChange
-- | File got added or modified; we record the new file info
FileChanged :: FileInfo -> FileChange
-- | File got deleted
FileDeleted :: FileChange
-- | Entries in FileMap either talk about the repository or the
-- index
data TargetPath
TargetPathRepo :: RepoPath -> TargetPath
TargetPathIndex :: IndexPath -> TargetPath
-- | Mapping from paths to file info
--
-- File maps are used in target files; the paths are relative to the
-- location of the target files containing the file map.
data FileMap
fileMapChanges :: FileMap -> FileMap -> Map TargetPath FileChange
class HasHeader a
-- | File expiry date
fileExpires :: HasHeader a => Lens' a FileExpires
-- | File version (monotonically increasing counter)
fileVersion :: HasHeader a => Lens' a FileVersion
-- | File version
--
-- The file version is a flat integer which must monotonically increase
-- on every file update.
--
-- Show and Read instance are defined in terms of the
-- underlying Int (this is use for example by hackage during the
-- backup process).
newtype FileVersion
FileVersion :: Int54 -> FileVersion
-- | File expiry date
--
-- A Nothing value here means no expiry. That makes it possible to
-- set some files to never expire. (Note that not having the Maybe in the
-- type here still allows that, because you could set an expiry date 2000
-- years into the future. By having the Maybe here we avoid the _need_
-- for such encoding issues.)
newtype FileExpires
FileExpires :: Maybe UTCTime -> FileExpires
-- | Occassionally it is useful to read only a header from a file.
--
-- HeaderOnly intentionally only has a FromJSON instance
-- (no ToJSON).
data Header
Header :: FileExpires -> FileVersion -> Header
[headerExpires] :: Header -> FileExpires
[headerVersion] :: Header -> FileVersion
expiresInDays :: UTCTime -> Integer -> FileExpires
expiresNever :: FileExpires
isExpired :: UTCTime -> FileExpires -> Bool
versionInitial :: FileVersion
versionIncrement :: FileVersion -> FileVersion
-- | Location of the various files we cache
--
-- Although the generic TUF algorithms do not care how we organize the
-- cache, we nonetheless specity this here because as long as there are
-- tools which access files in the cache directly we need to define the
-- cache layout. See also comments for defaultCacheLayout.
data CacheLayout
CacheLayout :: CachePath -> CachePath -> CachePath -> CachePath -> CachePath -> CachePath -> CachePath -> CacheLayout
-- | TUF root metadata
[cacheLayoutRoot] :: CacheLayout -> CachePath
-- | TUF timestamp
[cacheLayoutTimestamp] :: CacheLayout -> CachePath
-- | TUF snapshot
[cacheLayoutSnapshot] :: CacheLayout -> CachePath
-- | TUF mirrors list
[cacheLayoutMirrors] :: CacheLayout -> CachePath
-- | Uncompressed index tarball
[cacheLayoutIndexTar] :: CacheLayout -> CachePath
-- | Index to the uncompressed index tarball
[cacheLayoutIndexIdx] :: CacheLayout -> CachePath
-- | Compressed index tarball
--
-- We cache both the compressed and the uncompressed tarballs, because
-- incremental updates happen through the compressed tarball, but reads
-- happen through the uncompressed one (with the help of the tarball
-- index).
[cacheLayoutIndexTarGz] :: CacheLayout -> CachePath
-- | The cache layout cabal-install uses
--
-- We cache the index as cache/00-index.tar; this is
-- important because `cabal-install` expects to find it there (and does
-- not currently go through the hackage-security library to get files
-- from the index).
cabalCacheLayout :: CacheLayout
-- | Layout of the files within the index tarball
data IndexLayout
IndexLayout :: (forall dec. IndexFile dec -> IndexPath) -> (IndexPath -> Maybe (Some IndexFile)) -> IndexLayout
-- | Translate an IndexFile to a path
[indexFileToPath] :: IndexLayout -> forall dec. IndexFile dec -> IndexPath
-- | Parse an FilePath
[indexFileFromPath] :: IndexLayout -> IndexPath -> Maybe (Some IndexFile)
-- | Files that we might request from the index
--
-- The type index tells us the type of the decoded file, if any. For
-- files for which the library does not support decoding this will be
-- (). NOTE: Clients should NOT rely on this type index being
-- (), or they might break if we add support for parsing
-- additional file formats in the future.
--
-- TODO: If we wanted to support legacy Hackage, we should also have a
-- case for the global preferred-versions file. But supporting legacy
-- Hackage will probably require more work anyway..
data IndexFile :: * -> *
[IndexPkgMetadata] :: PackageIdentifier -> IndexFile (Signed Targets)
[IndexPkgCabal] :: PackageIdentifier -> IndexFile ()
[IndexPkgPrefs] :: PackageName -> IndexFile ()
-- | The layout of the index as maintained on Hackage
hackageIndexLayout :: IndexLayout
indexLayoutPkgMetadata :: IndexLayout -> PackageIdentifier -> IndexPath
indexLayoutPkgCabal :: IndexLayout -> PackageIdentifier -> IndexPath
indexLayoutPkgPrefs :: IndexLayout -> PackageName -> IndexPath
-- | Layout of a repository
data RepoLayout
RepoLayout :: RepoPath -> RepoPath -> RepoPath -> RepoPath -> RepoPath -> RepoPath -> (PackageIdentifier -> RepoPath) -> RepoLayout
-- | TUF root metadata
[repoLayoutRoot] :: RepoLayout -> RepoPath
-- | TUF timestamp
[repoLayoutTimestamp] :: RepoLayout -> RepoPath
-- | TUF snapshot
[repoLayoutSnapshot] :: RepoLayout -> RepoPath
-- | TUF mirrors list
[repoLayoutMirrors] :: RepoLayout -> RepoPath
-- | Compressed index tarball
[repoLayoutIndexTarGz] :: RepoLayout -> RepoPath
-- | Uncompressed index tarball
[repoLayoutIndexTar] :: RepoLayout -> RepoPath
-- | Path to the package tarball
[repoLayoutPkgTarGz] :: RepoLayout -> PackageIdentifier -> RepoPath
-- | The layout used on Hackage
hackageRepoLayout :: RepoLayout
-- | Layout used by cabal for ("legacy") local repos
--
-- Obviously, such repos do not normally contain any of the TUF files, so
-- their location is more or less arbitrary here.
cabalLocalRepoLayout :: RepoLayout
data Mirrors
Mirrors :: FileVersion -> FileExpires -> [Mirror] -> Mirrors
[mirrorsVersion] :: Mirrors -> FileVersion
[mirrorsExpires] :: Mirrors -> FileExpires
[mirrorsMirrors] :: Mirrors -> [Mirror]
-- | Definition of a mirror
--
-- NOTE: Unlike the TUF specification, we require that all mirrors must
-- have the same format. That is, we omit metapath and
-- targetspath.
data Mirror
Mirror :: URI -> MirrorContent -> Mirror
[mirrorUrlBase] :: Mirror -> URI
[mirrorContent] :: Mirror -> MirrorContent
-- | Full versus partial mirrors
--
-- The TUF spec explicitly allows for partial mirrors, with the mirrors
-- file specifying (through patterns) what is available from partial
-- mirrors.
--
-- For now we only support full mirrors; if we wanted to add partial
-- mirrors, we would add a second MirrorPartial constructor here
-- with arguments corresponding to TUF's metacontent and
-- targetscontent fields.
data MirrorContent
MirrorFull :: MirrorContent
type MirrorDescription = String
-- | Give a human-readable description of a particular mirror
--
-- (for use in error messages)
describeMirror :: Mirror -> MirrorDescription
-- | The root of the repository
--
-- Repository roots can be anchored at a remote URL or a local directory.
--
-- Note that even for remote repos RepoRoot is (potentially)
-- different from Web -- for a repository located at, say,
-- http://hackage.haskell.org they happen to coincide,
-- but for one location at
-- http://example.com/some/subdirectory they do not.
data RepoRoot
-- | Paths relative to the root of the repository
type RepoPath = Path RepoRoot
anchorRepoPathLocally :: Path root -> RepoPath -> Path root
anchorRepoPathRemotely :: Path Web -> RepoPath -> Path Web
-- | The root of the index tarball
data IndexRoot
-- | Paths relative to the root of the index tarball
type IndexPath = Path IndexRoot
-- | The cache directory
data CacheRoot
type CachePath = Path CacheRoot
-- | Anchor a cache path to the location of the cache
anchorCachePath :: Path root -> CachePath -> Path root
-- | The root metadata
--
-- NOTE: We must have the invariant that ALL keys (apart from delegation
-- keys) must be listed in rootKeys. (Delegation keys satisfy a
-- similar invariant, see Targets.)
data Root
Root :: FileVersion -> FileExpires -> KeyEnv -> RootRoles -> Root
[rootVersion] :: Root -> FileVersion
[rootExpires] :: Root -> FileExpires
[rootKeys] :: Root -> KeyEnv
[rootRoles] :: Root -> RootRoles
data RootRoles
RootRoles :: RoleSpec Root -> RoleSpec Snapshot -> RoleSpec Targets -> RoleSpec Timestamp -> RoleSpec Mirrors -> RootRoles
[rootRolesRoot] :: RootRoles -> RoleSpec Root
[rootRolesSnapshot] :: RootRoles -> RoleSpec Snapshot
[rootRolesTargets] :: RootRoles -> RoleSpec Targets
[rootRolesTimestamp] :: RootRoles -> RoleSpec Timestamp
[rootRolesMirrors] :: RootRoles -> RoleSpec Mirrors
-- | Role specification
--
-- The phantom type indicates what kind of type this role is meant to
-- verify.
data RoleSpec a
RoleSpec :: [Some PublicKey] -> KeyThreshold -> RoleSpec a
[roleSpecKeys] :: RoleSpec a -> [Some PublicKey]
[roleSpecThreshold] :: RoleSpec a -> KeyThreshold
data Signed a
Signed :: a -> Signatures -> Signed a
[signed] :: Signed a -> a
[signatures] :: Signed a -> Signatures
-- | A list of signatures
--
-- Invariant: each signature must be made with a different key. We
-- enforce this invariant for incoming untrusted data
-- (fromPreSignatures) but not for lists of signatures that we
-- create in code.
newtype Signatures
Signatures :: [Signature] -> Signatures
data Signature
Signature :: ByteString -> Some PublicKey -> Signature
[signature] :: Signature -> ByteString
[signatureKey] :: Signature -> Some PublicKey
-- | Create a new document without any signatures
unsigned :: a -> Signed a
-- | Sign a document
withSignatures :: ToJSON WriteJSON a => RepoLayout -> [Some Key] -> a -> Signed a
-- | Variation on withSignatures that doesn't need the repo layout
withSignatures' :: ToJSON Identity a => [Some Key] -> a -> Signed a
-- | Construct signatures for already rendered value
signRendered :: [Some Key] -> ByteString -> Signatures
verifySignature :: ByteString -> Signature -> Bool
-- | General FromJSON instance for signed datatypes
--
-- We don't give a general FromJSON instance for Signed because for some
-- datatypes we need to do something special (datatypes where we need to
-- read key environments); for instance, see the "Signed Root" instance.
signedFromJSON :: (MonadKeys m, FromJSON m a) => JSValue -> m (Signed a)
-- | Signature verification
--
-- NOTES: 1. By definition, the signature must be verified against the
-- canonical JSON format. This means we _must_ parse and then pretty
-- print (as we do here) because the document as stored may or may not be
-- in canonical format. 2. However, it is important that we NOT translate
-- from the JSValue to whatever internal datatype we are using and then
-- back to JSValue, because that may not roundtrip: we must allow for
-- additional fields in the JSValue that we ignore (and would therefore
-- lose when we attempt to roundtrip). 3. We verify that all signatures
-- are valid, but we cannot verify (here) that these signatures are
-- signed with the right key, or that we have a sufficient number of
-- signatures. This will be the responsibility of the calling code.
verifySignatures :: JSValue -> Signatures -> Bool
-- | File with uninterpreted signatures
--
-- Sometimes we want to be able to read a file without interpreting the
-- signatures (that is, resolving the key IDs) or doing any kind of
-- checks on them. One advantage of this is that this allows us to read
-- many file types without any key environment at all, which is sometimes
-- useful.
data UninterpretedSignatures a
UninterpretedSignatures :: a -> [PreSignature] -> UninterpretedSignatures a
[uninterpretedSigned] :: UninterpretedSignatures a -> a
[uninterpretedSignatures] :: UninterpretedSignatures a -> [PreSignature]
-- | A signature with a key ID (rather than an actual key)
--
-- This corresponds precisely to the TUF representation of a signature.
data PreSignature
PreSignature :: ByteString -> Some KeyType -> KeyId -> PreSignature
[presignature] :: PreSignature -> ByteString
[presigMethod] :: PreSignature -> Some KeyType
[presigKeyId] :: PreSignature -> KeyId
-- | Convert a pre-signature to a signature
--
-- Verifies that the key type matches the advertised method.
fromPreSignature :: MonadKeys m => PreSignature -> m Signature
-- | Convert a list of PreSignatures to a list of Signatures
--
-- This verifies the invariant that all signatures are made with
-- different keys. We do this on the presignatures rather than the
-- signatures so that we can do the check on key IDs, rather than keys
-- (the latter don't have an Ord instance).
fromPreSignatures :: MonadKeys m => [PreSignature] -> m Signatures
-- | Convert signature to pre-signature
toPreSignature :: Signature -> PreSignature
-- | Convert list of pre-signatures to a list of signatures
toPreSignatures :: Signatures -> [PreSignature]
data Snapshot
Snapshot :: FileVersion -> FileExpires -> FileInfo -> FileInfo -> FileInfo -> Maybe FileInfo -> Snapshot
[snapshotVersion] :: Snapshot -> FileVersion
[snapshotExpires] :: Snapshot -> FileExpires
-- | File info for the root metadata
--
-- We list this explicitly in the snapshot so that we can check if we
-- need to update the root metadata without first having to download the
-- entire index tarball.
[snapshotInfoRoot] :: Snapshot -> FileInfo
-- | File info for the mirror metadata
[snapshotInfoMirrors] :: Snapshot -> FileInfo
-- | Compressed index tarball
[snapshotInfoTarGz] :: Snapshot -> FileInfo
-- | Uncompressed index tarball
--
-- Repositories are not required to provide this.
[snapshotInfoTar] :: Snapshot -> Maybe FileInfo
-- | Target metadata
--
-- Most target files do not need expiry dates because they are not
-- subject to change (and hence attacks like freeze attacks are not a
-- concern).
data Targets
Targets :: FileVersion -> FileExpires -> FileMap -> Maybe Delegations -> Targets
[targetsVersion] :: Targets -> FileVersion
[targetsExpires] :: Targets -> FileExpires
[targetsTargets] :: Targets -> FileMap
[targetsDelegations] :: Targets -> Maybe Delegations
-- | Delegations
--
-- Much like the Root datatype, this must have an invariant that ALL used
-- keys (apart from the global keys, which are in the root key
-- environment) must be listed in delegationsKeys.
data Delegations
Delegations :: KeyEnv -> [DelegationSpec] -> Delegations
[delegationsKeys] :: Delegations -> KeyEnv
[delegationsRoles] :: Delegations -> [DelegationSpec]
-- | Delegation specification
--
-- NOTE: This is a close analogue of RoleSpec.
data DelegationSpec
DelegationSpec :: [Some PublicKey] -> KeyThreshold -> Delegation -> DelegationSpec
[delegationSpecKeys] :: DelegationSpec -> [Some PublicKey]
[delegationSpecThreshold] :: DelegationSpec -> KeyThreshold
[delegation] :: DelegationSpec -> Delegation
-- | A delegation
--
-- A delegation is a pair of a pattern and a replacement.
--
-- See match for an example.
data Delegation
Delegation :: Pattern a -> Replacement a -> Delegation
targetsLookup :: TargetPath -> Targets -> Maybe FileInfo
data Timestamp
Timestamp :: FileVersion -> FileExpires -> FileInfo -> Timestamp
[timestampVersion] :: Timestamp -> FileVersion
[timestampExpires] :: Timestamp -> FileExpires
[timestampInfoSnapshot] :: Timestamp -> FileInfo
-- | Main entry point into the Hackage Security framework for clients
module Hackage.Security.Client
-- | Generic logic for checking if there are updates
--
-- This implements the logic described in Section 5.1, "The client
-- application", of the TUF spec. It checks which of the server metadata
-- has changed, and downloads all changed metadata to the local cache.
-- (Metadata here refers both to the TUF security metadata as well as the
-- Hackage packge index.)
--
-- You should pass Nothing for the UTCTime _only_ under
-- exceptional circumstances (such as when the main server is down for
-- longer than the expiry dates used in the timestamp files on mirrors).
checkForUpdates :: (Throws VerificationError, Throws SomeRemoteError) => Repository down -> Maybe UTCTime -> IO HasUpdates
data HasUpdates
HasUpdates :: HasUpdates
NoUpdates :: HasUpdates
-- | Download a package
downloadPackage :: (Throws SomeRemoteError, Throws VerificationError, Throws InvalidPackageException) => Repository down -> PackageIdentifier -> Path Absolute -> IO ()
-- | Variation on downloadPackage that takes a FilePath instead.
downloadPackage' :: (Throws SomeRemoteError, Throws VerificationError, Throws InvalidPackageException) => Repository down -> PackageIdentifier -> FilePath -> IO ()
-- | Index directory
data Directory
Directory :: DirectoryEntry -> DirectoryEntry -> (forall dec. IndexFile dec -> Maybe DirectoryEntry) -> [(DirectoryEntry, IndexPath, Maybe (Some IndexFile))] -> Directory
-- | The first entry in the dictionary
[directoryFirst] :: Directory -> DirectoryEntry
-- | The next available (i.e., one after last) directory entry
[directoryNext] :: Directory -> DirectoryEntry
-- | Lookup an entry in the dictionary
--
-- This is an efficient operation.
[directoryLookup] :: Directory -> forall dec. IndexFile dec -> Maybe DirectoryEntry
-- | An enumeration of all entries
--
-- This field is lazily constructed, so if you don't need it, it does not
-- incur a performance overhead. Moreover, the IndexFile is also
-- created lazily so if you only need the raw IndexPath there is
-- no parsing overhead.
--
-- The entries are ordered by DirectoryEntry so that the entries
-- can efficiently be read in sequence.
--
-- NOTE: This means that there are two ways to enumerate all entries in
-- the tar file, since when lookup an entry using indexLookupEntry
-- the DirectoryEntry of the next entry is also returned. However,
-- this involves reading through the entire tar file. If you
-- only need to read some files, it is significantly more
-- efficient to enumerate the tar entries using directoryEntries
-- instead and only call indexLookupEntry when required.
[directoryEntries] :: Directory -> [(DirectoryEntry, IndexPath, Maybe (Some IndexFile))]
-- | Entry into the Hackage index.
newtype DirectoryEntry
DirectoryEntry :: TarEntryOffset -> DirectoryEntry
-- | (Low-level) block number of the tar index entry
--
-- Exposed for the benefit of clients who read the .tar file
-- directly. For this reason also the Show and Read
-- instances for DirectoryEntry just print and parse the
-- underlying TarEntryOffset.
[directoryEntryBlockNo] :: DirectoryEntry -> TarEntryOffset
-- | Read the Hackage index directory
--
-- Should only be called after checkForUpdates.
getDirectory :: Repository down -> IO Directory
-- | Files that we might request from the index
--
-- The type index tells us the type of the decoded file, if any. For
-- files for which the library does not support decoding this will be
-- (). NOTE: Clients should NOT rely on this type index being
-- (), or they might break if we add support for parsing
-- additional file formats in the future.
--
-- TODO: If we wanted to support legacy Hackage, we should also have a
-- case for the global preferred-versions file. But supporting legacy
-- Hackage will probably require more work anyway..
data IndexFile :: * -> *
[IndexPkgMetadata] :: PackageIdentifier -> IndexFile (Signed Targets)
[IndexPkgCabal] :: PackageIdentifier -> IndexFile ()
[IndexPkgPrefs] :: PackageName -> IndexFile ()
-- | Entry from the Hackage index; see withIndex.
data IndexEntry dec
IndexEntry :: IndexPath -> Maybe (IndexFile dec) -> ByteString -> Either SomeException dec -> EpochTime -> IndexEntry dec
-- | The raw path in the tarfile
[indexEntryPath] :: IndexEntry dec -> IndexPath
-- | The parsed file (if recognised)
[indexEntryPathParsed] :: IndexEntry dec -> Maybe (IndexFile dec)
-- | The raw contents
--
-- Although this is a lazy bytestring, this is actually read into memory
-- strictly (i.e., it can safely be used outside the scope of withIndex
-- and friends).
[indexEntryContent] :: IndexEntry dec -> ByteString
-- | The parsed contents
--
-- This field is lazily constructed; the parser is not unless you do a
-- pattern match on this value.
[indexEntryContentParsed] :: IndexEntry dec -> Either SomeException dec
-- | The time of the entry in the tarfile.
[indexEntryTime] :: IndexEntry dec -> EpochTime
-- | Various operations that we can perform on the index once its open
--
-- Note that IndexEntry contains a fields both for the raw file
-- contents and the parsed file contents; clients can choose which to
-- use.
--
-- In principle these callbacks will do verification (once we have
-- implemented author signing). Right now they don't need to do that,
-- because the index as a whole will have been verified.
data IndexCallbacks
IndexCallbacks :: (DirectoryEntry -> IO (Some IndexEntry, Maybe DirectoryEntry)) -> (forall dec. IndexFile dec -> IO (Maybe (IndexEntry dec))) -> (forall dec. DirectoryEntry -> IndexFile dec -> IO (IndexEntry dec)) -> (Throws InvalidPackageException => PackageIdentifier -> IO (Trusted ByteString)) -> (Throws InvalidPackageException => PackageIdentifier -> IO (Trusted Targets)) -> ((Throws InvalidPackageException, Throws VerificationError) => PackageIdentifier -> IO (Trusted FileInfo)) -> ((Throws InvalidPackageException, Throws VerificationError) => PackageIdentifier -> IO (Trusted Hash)) -> Directory -> IndexCallbacks
-- | Look up an entry by DirectoryEntry
--
-- Since these DirectoryEntrys must come from somewhere (probably
-- from the Directory), it is assumed that they are valid; if they
-- are not, an (unchecked) exception will be thrown.
--
-- This function also returns the DirectoryEntry of the
-- next file in the index (if any) for the benefit of clients who
-- wish to walk through the entire index.
[indexLookupEntry] :: IndexCallbacks -> DirectoryEntry -> IO (Some IndexEntry, Maybe DirectoryEntry)
-- | Look up an entry by IndexFile
--
-- Returns Nothing if the IndexFile does not refer to an
-- existing file.
[indexLookupFile] :: IndexCallbacks -> forall dec. IndexFile dec -> IO (Maybe (IndexEntry dec))
-- | Variation if both the DirectoryEntry and the IndexFile
-- are known
--
-- You might use this when scanning the index using
-- directoryEntries.
[indexLookupFileEntry] :: IndexCallbacks -> forall dec. DirectoryEntry -> IndexFile dec -> IO (IndexEntry dec)
-- | Get (raw) cabal file (wrapper around indexLookupFile)
[indexLookupCabal] :: IndexCallbacks -> Throws InvalidPackageException => PackageIdentifier -> IO (Trusted ByteString)
-- | Lookup package metadata (wrapper around indexLookupFile)
--
-- This will throw an (unchecked) exception if the targets.json
-- file could not be parsed.
[indexLookupMetadata] :: IndexCallbacks -> Throws InvalidPackageException => PackageIdentifier -> IO (Trusted Targets)
-- | Get file info (including hash) (wrapper around indexLookupFile)
[indexLookupFileInfo] :: IndexCallbacks -> (Throws InvalidPackageException, Throws VerificationError) => PackageIdentifier -> IO (Trusted FileInfo)
-- | Get the SHA256 hash for a package (wrapper around
-- indexLookupInfo)
--
-- In addition to the exceptions thrown by indexLookupInfo, this
-- will also throw an exception if the SHA256 is not listed in the
-- FileMap (again, this will not happen with a well-formed Hackage
-- index.)
[indexLookupHash] :: IndexCallbacks -> (Throws InvalidPackageException, Throws VerificationError) => PackageIdentifier -> IO (Trusted Hash)
-- | The Directory for the index
--
-- We provide this here because withIndex will have read this
-- anyway.
[indexDirectory] :: IndexCallbacks -> Directory
-- | Look up entries in the Hackage index
--
-- This is in withFile style so that clients can efficiently look
-- up multiple files from the index.
--
-- Should only be called after checkForUpdates.
withIndex :: Repository down -> (IndexCallbacks -> IO a) -> IO a
-- | Check if we need to bootstrap (i.e., if we have root info)
requiresBootstrap :: Repository down -> IO Bool
-- | Bootstrap the chain of trust
--
-- New clients might need to obtain a copy of the root metadata. This
-- however represents a chicken-and-egg problem: how can we verify the
-- root metadata we downloaded? The only possibility is to be provided
-- with a set of an out-of-band set of root keys and an appropriate
-- threshold.
--
-- Clients who provide a threshold of 0 can do an initial "unsafe" update
-- of the root information, if they wish.
--
-- The downloaded root information will _only_ be verified against the
-- provided keys, and _not_ against previously downloaded root info (if
-- any). It is the responsibility of the client to call bootstrap
-- only when this is the desired behaviour.
bootstrap :: (Throws SomeRemoteError, Throws VerificationError) => Repository down -> [KeyId] -> KeyThreshold -> IO ()
-- | File length
--
-- Having verified file length information means we can protect against
-- endless data attacks and similar.
newtype FileLength
FileLength :: Int54 -> FileLength
[fileLength] :: FileLength -> Int54
-- | File hash
newtype Hash
Hash :: String -> Hash
-- | Key threshold
--
-- The key threshold is the minimum number of keys a document must be
-- signed with. Key thresholds are specified in RoleSpec or
-- DelegationsSpec.
newtype KeyThreshold
KeyThreshold :: Int54 -> KeyThreshold
-- | File information
--
-- This intentionally does not have an Eq instance; see
-- knownFileInfoEqual and verifyFileInfo instead.
--
-- NOTE: Throughout we compute file information always over the raw
-- bytes. For example, when timestamp.json lists the hash of
-- snapshot.json, this hash is computed over the actual
-- snapshot.json file (as opposed to the canonical form of the
-- embedded JSON). This brings it in line with the hash computed over
-- target files, where that is the only choice available.
data FileInfo
FileInfo :: FileLength -> Map HashFn Hash -> FileInfo
[fileInfoLength] :: FileInfo -> FileLength
[fileInfoHashes] :: FileInfo -> Map HashFn Hash
data HashFn
HashFnSHA256 :: HashFn
HashFnMD5 :: HashFn
-- | File hash
newtype Hash
Hash :: String -> Hash
-- | Compute FileInfo
--
-- TODO: Currently this will load the entire input bytestring into
-- memory. We need to make this incremental, by computing the length and
-- all hashes in a single traversal over the input.
fileInfo :: ByteString -> FileInfo
-- | Compute FileInfo
computeFileInfo :: FsRoot root => Path root -> IO FileInfo
-- | Compare the expected trusted file info against the actual file info of
-- a target file.
--
-- This should be used only when the FileInfo is already known. If
-- we want to compare known FileInfo against a file on disk we
-- should delay until we have confirmed that the file lengths match (see
-- downloadedVerify).
compareTrustedFileInfo :: FileInfo -> FileInfo -> Bool
knownFileInfoEqual :: FileInfo -> FileInfo -> Bool
-- | Extract SHA256 hash from FileInfo (if present)
fileInfoSHA256 :: FileInfo -> Maybe Hash
-- | 54-bit integer values
--
-- JavaScript can only safely represent numbers between -(2^53 -
-- 1) and 2^53 - 1.
--
-- TODO: Although we introduce the type here, we don't actually do any
-- bounds checking and just inherit all type class instance from Int64.
-- We should probably define fromInteger to do bounds checking,
-- give different instances for type classes such as Bounded and
-- FiniteBits, etc.
data Int54
data FileChange
-- | File got added or modified; we record the new file info
FileChanged :: FileInfo -> FileChange
-- | File got deleted
FileDeleted :: FileChange
-- | Entries in FileMap either talk about the repository or the
-- index
data TargetPath
TargetPathRepo :: RepoPath -> TargetPath
TargetPathIndex :: IndexPath -> TargetPath
-- | Mapping from paths to file info
--
-- File maps are used in target files; the paths are relative to the
-- location of the target files containing the file map.
data FileMap
fileMapChanges :: FileMap -> FileMap -> Map TargetPath FileChange
class HasHeader a
-- | File expiry date
fileExpires :: HasHeader a => Lens' a FileExpires
-- | File version (monotonically increasing counter)
fileVersion :: HasHeader a => Lens' a FileVersion
-- | File version
--
-- The file version is a flat integer which must monotonically increase
-- on every file update.
--
-- Show and Read instance are defined in terms of the
-- underlying Int (this is use for example by hackage during the
-- backup process).
newtype FileVersion
FileVersion :: Int54 -> FileVersion
-- | File expiry date
--
-- A Nothing value here means no expiry. That makes it possible to
-- set some files to never expire. (Note that not having the Maybe in the
-- type here still allows that, because you could set an expiry date 2000
-- years into the future. By having the Maybe here we avoid the _need_
-- for such encoding issues.)
newtype FileExpires
FileExpires :: Maybe UTCTime -> FileExpires
-- | Occassionally it is useful to read only a header from a file.
--
-- HeaderOnly intentionally only has a FromJSON instance
-- (no ToJSON).
data Header
Header :: FileExpires -> FileVersion -> Header
[headerExpires] :: Header -> FileExpires
[headerVersion] :: Header -> FileVersion
expiresInDays :: UTCTime -> Integer -> FileExpires
expiresNever :: FileExpires
isExpired :: UTCTime -> FileExpires -> Bool
versionInitial :: FileVersion
versionIncrement :: FileVersion -> FileVersion
-- | Location of the various files we cache
--
-- Although the generic TUF algorithms do not care how we organize the
-- cache, we nonetheless specity this here because as long as there are
-- tools which access files in the cache directly we need to define the
-- cache layout. See also comments for defaultCacheLayout.
data CacheLayout
CacheLayout :: CachePath -> CachePath -> CachePath -> CachePath -> CachePath -> CachePath -> CachePath -> CacheLayout
-- | TUF root metadata
[cacheLayoutRoot] :: CacheLayout -> CachePath
-- | TUF timestamp
[cacheLayoutTimestamp] :: CacheLayout -> CachePath
-- | TUF snapshot
[cacheLayoutSnapshot] :: CacheLayout -> CachePath
-- | TUF mirrors list
[cacheLayoutMirrors] :: CacheLayout -> CachePath
-- | Uncompressed index tarball
[cacheLayoutIndexTar] :: CacheLayout -> CachePath
-- | Index to the uncompressed index tarball
[cacheLayoutIndexIdx] :: CacheLayout -> CachePath
-- | Compressed index tarball
--
-- We cache both the compressed and the uncompressed tarballs, because
-- incremental updates happen through the compressed tarball, but reads
-- happen through the uncompressed one (with the help of the tarball
-- index).
[cacheLayoutIndexTarGz] :: CacheLayout -> CachePath
-- | The cache layout cabal-install uses
--
-- We cache the index as cache/00-index.tar; this is
-- important because `cabal-install` expects to find it there (and does
-- not currently go through the hackage-security library to get files
-- from the index).
cabalCacheLayout :: CacheLayout
-- | Layout of the files within the index tarball
data IndexLayout
IndexLayout :: (forall dec. IndexFile dec -> IndexPath) -> (IndexPath -> Maybe (Some IndexFile)) -> IndexLayout
-- | Translate an IndexFile to a path
[indexFileToPath] :: IndexLayout -> forall dec. IndexFile dec -> IndexPath
-- | Parse an FilePath
[indexFileFromPath] :: IndexLayout -> IndexPath -> Maybe (Some IndexFile)
-- | Files that we might request from the index
--
-- The type index tells us the type of the decoded file, if any. For
-- files for which the library does not support decoding this will be
-- (). NOTE: Clients should NOT rely on this type index being
-- (), or they might break if we add support for parsing
-- additional file formats in the future.
--
-- TODO: If we wanted to support legacy Hackage, we should also have a
-- case for the global preferred-versions file. But supporting legacy
-- Hackage will probably require more work anyway..
data IndexFile :: * -> *
[IndexPkgMetadata] :: PackageIdentifier -> IndexFile (Signed Targets)
[IndexPkgCabal] :: PackageIdentifier -> IndexFile ()
[IndexPkgPrefs] :: PackageName -> IndexFile ()
-- | The layout of the index as maintained on Hackage
hackageIndexLayout :: IndexLayout
indexLayoutPkgMetadata :: IndexLayout -> PackageIdentifier -> IndexPath
indexLayoutPkgCabal :: IndexLayout -> PackageIdentifier -> IndexPath
indexLayoutPkgPrefs :: IndexLayout -> PackageName -> IndexPath
-- | Layout of a repository
data RepoLayout
RepoLayout :: RepoPath -> RepoPath -> RepoPath -> RepoPath -> RepoPath -> RepoPath -> (PackageIdentifier -> RepoPath) -> RepoLayout
-- | TUF root metadata
[repoLayoutRoot] :: RepoLayout -> RepoPath
-- | TUF timestamp
[repoLayoutTimestamp] :: RepoLayout -> RepoPath
-- | TUF snapshot
[repoLayoutSnapshot] :: RepoLayout -> RepoPath
-- | TUF mirrors list
[repoLayoutMirrors] :: RepoLayout -> RepoPath
-- | Compressed index tarball
[repoLayoutIndexTarGz] :: RepoLayout -> RepoPath
-- | Uncompressed index tarball
[repoLayoutIndexTar] :: RepoLayout -> RepoPath
-- | Path to the package tarball
[repoLayoutPkgTarGz] :: RepoLayout -> PackageIdentifier -> RepoPath
-- | The layout used on Hackage
hackageRepoLayout :: RepoLayout
-- | Layout used by cabal for ("legacy") local repos
--
-- Obviously, such repos do not normally contain any of the TUF files, so
-- their location is more or less arbitrary here.
cabalLocalRepoLayout :: RepoLayout
data Mirrors
Mirrors :: FileVersion -> FileExpires -> [Mirror] -> Mirrors
[mirrorsVersion] :: Mirrors -> FileVersion
[mirrorsExpires] :: Mirrors -> FileExpires
[mirrorsMirrors] :: Mirrors -> [Mirror]
-- | Definition of a mirror
--
-- NOTE: Unlike the TUF specification, we require that all mirrors must
-- have the same format. That is, we omit metapath and
-- targetspath.
data Mirror
Mirror :: URI -> MirrorContent -> Mirror
[mirrorUrlBase] :: Mirror -> URI
[mirrorContent] :: Mirror -> MirrorContent
-- | Full versus partial mirrors
--
-- The TUF spec explicitly allows for partial mirrors, with the mirrors
-- file specifying (through patterns) what is available from partial
-- mirrors.
--
-- For now we only support full mirrors; if we wanted to add partial
-- mirrors, we would add a second MirrorPartial constructor here
-- with arguments corresponding to TUF's metacontent and
-- targetscontent fields.
data MirrorContent
MirrorFull :: MirrorContent
type MirrorDescription = String
-- | Give a human-readable description of a particular mirror
--
-- (for use in error messages)
describeMirror :: Mirror -> MirrorDescription
-- | The root of the repository
--
-- Repository roots can be anchored at a remote URL or a local directory.
--
-- Note that even for remote repos RepoRoot is (potentially)
-- different from Web -- for a repository located at, say,
-- http://hackage.haskell.org they happen to coincide,
-- but for one location at
-- http://example.com/some/subdirectory they do not.
data RepoRoot
-- | Paths relative to the root of the repository
type RepoPath = Path RepoRoot
anchorRepoPathLocally :: Path root -> RepoPath -> Path root
anchorRepoPathRemotely :: Path Web -> RepoPath -> Path Web
-- | The root of the index tarball
data IndexRoot
-- | Paths relative to the root of the index tarball
type IndexPath = Path IndexRoot
-- | The cache directory
data CacheRoot
type CachePath = Path CacheRoot
-- | Anchor a cache path to the location of the cache
anchorCachePath :: Path root -> CachePath -> Path root
-- | The root metadata
--
-- NOTE: We must have the invariant that ALL keys (apart from delegation
-- keys) must be listed in rootKeys. (Delegation keys satisfy a
-- similar invariant, see Targets.)
data Root
Root :: FileVersion -> FileExpires -> KeyEnv -> RootRoles -> Root
[rootVersion] :: Root -> FileVersion
[rootExpires] :: Root -> FileExpires
[rootKeys] :: Root -> KeyEnv
[rootRoles] :: Root -> RootRoles
data RootRoles
RootRoles :: RoleSpec Root -> RoleSpec Snapshot -> RoleSpec Targets -> RoleSpec Timestamp -> RoleSpec Mirrors -> RootRoles
[rootRolesRoot] :: RootRoles -> RoleSpec Root
[rootRolesSnapshot] :: RootRoles -> RoleSpec Snapshot
[rootRolesTargets] :: RootRoles -> RoleSpec Targets
[rootRolesTimestamp] :: RootRoles -> RoleSpec Timestamp
[rootRolesMirrors] :: RootRoles -> RoleSpec Mirrors
-- | Role specification
--
-- The phantom type indicates what kind of type this role is meant to
-- verify.
data RoleSpec a
RoleSpec :: [Some PublicKey] -> KeyThreshold -> RoleSpec a
[roleSpecKeys] :: RoleSpec a -> [Some PublicKey]
[roleSpecThreshold] :: RoleSpec a -> KeyThreshold
data Signed a
Signed :: a -> Signatures -> Signed a
[signed] :: Signed a -> a
[signatures] :: Signed a -> Signatures
-- | A list of signatures
--
-- Invariant: each signature must be made with a different key. We
-- enforce this invariant for incoming untrusted data
-- (fromPreSignatures) but not for lists of signatures that we
-- create in code.
newtype Signatures
Signatures :: [Signature] -> Signatures
data Signature
Signature :: ByteString -> Some PublicKey -> Signature
[signature] :: Signature -> ByteString
[signatureKey] :: Signature -> Some PublicKey
-- | Create a new document without any signatures
unsigned :: a -> Signed a
-- | Sign a document
withSignatures :: ToJSON WriteJSON a => RepoLayout -> [Some Key] -> a -> Signed a
-- | Variation on withSignatures that doesn't need the repo layout
withSignatures' :: ToJSON Identity a => [Some Key] -> a -> Signed a
-- | Construct signatures for already rendered value
signRendered :: [Some Key] -> ByteString -> Signatures
verifySignature :: ByteString -> Signature -> Bool
-- | General FromJSON instance for signed datatypes
--
-- We don't give a general FromJSON instance for Signed because for some
-- datatypes we need to do something special (datatypes where we need to
-- read key environments); for instance, see the "Signed Root" instance.
signedFromJSON :: (MonadKeys m, FromJSON m a) => JSValue -> m (Signed a)
-- | Signature verification
--
-- NOTES: 1. By definition, the signature must be verified against the
-- canonical JSON format. This means we _must_ parse and then pretty
-- print (as we do here) because the document as stored may or may not be
-- in canonical format. 2. However, it is important that we NOT translate
-- from the JSValue to whatever internal datatype we are using and then
-- back to JSValue, because that may not roundtrip: we must allow for
-- additional fields in the JSValue that we ignore (and would therefore
-- lose when we attempt to roundtrip). 3. We verify that all signatures
-- are valid, but we cannot verify (here) that these signatures are
-- signed with the right key, or that we have a sufficient number of
-- signatures. This will be the responsibility of the calling code.
verifySignatures :: JSValue -> Signatures -> Bool
-- | File with uninterpreted signatures
--
-- Sometimes we want to be able to read a file without interpreting the
-- signatures (that is, resolving the key IDs) or doing any kind of
-- checks on them. One advantage of this is that this allows us to read
-- many file types without any key environment at all, which is sometimes
-- useful.
data UninterpretedSignatures a
UninterpretedSignatures :: a -> [PreSignature] -> UninterpretedSignatures a
[uninterpretedSigned] :: UninterpretedSignatures a -> a
[uninterpretedSignatures] :: UninterpretedSignatures a -> [PreSignature]
-- | A signature with a key ID (rather than an actual key)
--
-- This corresponds precisely to the TUF representation of a signature.
data PreSignature
PreSignature :: ByteString -> Some KeyType -> KeyId -> PreSignature
[presignature] :: PreSignature -> ByteString
[presigMethod] :: PreSignature -> Some KeyType
[presigKeyId] :: PreSignature -> KeyId
-- | Convert a pre-signature to a signature
--
-- Verifies that the key type matches the advertised method.
fromPreSignature :: MonadKeys m => PreSignature -> m Signature
-- | Convert a list of PreSignatures to a list of Signatures
--
-- This verifies the invariant that all signatures are made with
-- different keys. We do this on the presignatures rather than the
-- signatures so that we can do the check on key IDs, rather than keys
-- (the latter don't have an Ord instance).
fromPreSignatures :: MonadKeys m => [PreSignature] -> m Signatures
-- | Convert signature to pre-signature
toPreSignature :: Signature -> PreSignature
-- | Convert list of pre-signatures to a list of signatures
toPreSignatures :: Signatures -> [PreSignature]
data Snapshot
Snapshot :: FileVersion -> FileExpires -> FileInfo -> FileInfo -> FileInfo -> Maybe FileInfo -> Snapshot
[snapshotVersion] :: Snapshot -> FileVersion
[snapshotExpires] :: Snapshot -> FileExpires
-- | File info for the root metadata
--
-- We list this explicitly in the snapshot so that we can check if we
-- need to update the root metadata without first having to download the
-- entire index tarball.
[snapshotInfoRoot] :: Snapshot -> FileInfo
-- | File info for the mirror metadata
[snapshotInfoMirrors] :: Snapshot -> FileInfo
-- | Compressed index tarball
[snapshotInfoTarGz] :: Snapshot -> FileInfo
-- | Uncompressed index tarball
--
-- Repositories are not required to provide this.
[snapshotInfoTar] :: Snapshot -> Maybe FileInfo
-- | Target metadata
--
-- Most target files do not need expiry dates because they are not
-- subject to change (and hence attacks like freeze attacks are not a
-- concern).
data Targets
Targets :: FileVersion -> FileExpires -> FileMap -> Maybe Delegations -> Targets
[targetsVersion] :: Targets -> FileVersion
[targetsExpires] :: Targets -> FileExpires
[targetsTargets] :: Targets -> FileMap
[targetsDelegations] :: Targets -> Maybe Delegations
-- | Delegations
--
-- Much like the Root datatype, this must have an invariant that ALL used
-- keys (apart from the global keys, which are in the root key
-- environment) must be listed in delegationsKeys.
data Delegations
Delegations :: KeyEnv -> [DelegationSpec] -> Delegations
[delegationsKeys] :: Delegations -> KeyEnv
[delegationsRoles] :: Delegations -> [DelegationSpec]
-- | Delegation specification
--
-- NOTE: This is a close analogue of RoleSpec.
data DelegationSpec
DelegationSpec :: [Some PublicKey] -> KeyThreshold -> Delegation -> DelegationSpec
[delegationSpecKeys] :: DelegationSpec -> [Some PublicKey]
[delegationSpecThreshold] :: DelegationSpec -> KeyThreshold
[delegation] :: DelegationSpec -> Delegation
-- | A delegation
--
-- A delegation is a pair of a pattern and a replacement.
--
-- See match for an example.
data Delegation
Delegation :: Pattern a -> Replacement a -> Delegation
targetsLookup :: TargetPath -> Targets -> Maybe FileInfo
data Timestamp
Timestamp :: FileVersion -> FileExpires -> FileInfo -> Timestamp
[timestampVersion] :: Timestamp -> FileVersion
[timestampExpires] :: Timestamp -> FileExpires
[timestampInfoSnapshot] :: Timestamp -> FileInfo
data Ed25519
data Key a
[KeyEd25519] :: PublicKey -> SecretKey -> Key Ed25519
data PublicKey a
[PublicKeyEd25519] :: PublicKey -> PublicKey Ed25519
data PrivateKey a
[PrivateKeyEd25519] :: SecretKey -> PrivateKey Ed25519
data KeyType typ
[KeyTypeEd25519] :: KeyType Ed25519
somePublicKey :: Some Key -> Some PublicKey
somePublicKeyType :: Some PublicKey -> Some KeyType
someKeyId :: HasKeyId key => Some key -> KeyId
publicKey :: Key a -> PublicKey a
privateKey :: Key a -> PrivateKey a
createKey :: KeyType key -> IO (Key key)
createKey' :: KeyType key -> IO (Some Key)
-- | The key ID of a key, by definition, is the hexdigest of the SHA-256
-- hash of the canonical JSON form of the key where the private object
-- key is excluded.
--
-- NOTE: The FromJSON and ToJSON instances for KeyId are ntentially
-- omitted. Use writeKeyAsId instead.
newtype KeyId
KeyId :: String -> KeyId
[keyIdString] :: KeyId -> String
-- | Compute the key ID of a key
class HasKeyId key
keyId :: HasKeyId key => key typ -> KeyId
-- | Sign a bytestring and return the signature
--
-- TODO: It is unfortunate that we have to convert to a strict bytestring
-- for ed25519
sign :: PrivateKey typ -> ByteString -> ByteString
verify :: PublicKey typ -> ByteString -> ByteString -> Bool
trusted :: Trusted a -> a
-- | Repository
--
-- This is an abstract representation of a repository. It simply provides
-- a way to download metafiles and target files, without specifying how
-- this is done. For instance, for a local repository this could just be
-- doing a file read, whereas for remote repositories this could be using
-- any kind of HTTP client.
data Repository down
class DownloadedFile (down :: * -> *)
-- | Verify a download file
downloadedVerify :: DownloadedFile down => down a -> Trusted FileInfo -> IO Bool
-- | Read the file we just downloaded into memory
--
-- We never read binary data, only metadata.
downloadedRead :: DownloadedFile down => down Metadata -> IO ByteString
-- | Copy a downloaded file to its destination
downloadedCopyTo :: DownloadedFile down => down a -> Path Absolute -> IO ()
-- | Repository-specific exceptions
--
-- For instance, for repositories using HTTP this might correspond to a
-- 404; for local repositories this might correspond to file-not-found,
-- etc.
data SomeRemoteError :: *
[SomeRemoteError] :: Exception e => e -> SomeRemoteError
-- | Log messages
--
-- We use a RemoteFile rather than a RepoPath here because
-- we might not have a RepoPath for the file that we were trying
-- to download (that is, for example if the server does not provide an
-- uncompressed tarball, it doesn't make much sense to list the path to
-- that non-existing uncompressed tarball).
data LogMessage
-- | Root information was updated
--
-- This message is issued when the root information is updated as part of
-- the normal check for updates procedure. If the root information is
-- updated because of a verification error WarningVerificationError is
-- issued instead.
LogRootUpdated :: LogMessage
-- | A verification error
--
-- Verification errors can be temporary, and may be resolved later; hence
-- these are just warnings. (Verification errors that cannot be resolved
-- are thrown as exceptions.)
LogVerificationError :: VerificationError -> LogMessage
-- | Download a file from a repository
LogDownloading :: RemoteFile fs typ -> LogMessage
-- | Incrementally updating a file from a repository
LogUpdating :: RemoteFile fs Binary -> LogMessage
-- | Selected a particular mirror
LogSelectedMirror :: MirrorDescription -> LogMessage
-- | Updating a file failed (we will instead download it whole)
LogCannotUpdate :: RemoteFile fs Binary -> UpdateFailure -> LogMessage
-- | We got an exception with a particular mirror (we will try with a
-- different mirror if any are available)
LogMirrorFailed :: MirrorDescription -> SomeException -> LogMessage
-- | Re-throw all exceptions thrown by the client API as unchecked
-- exceptions
uncheckClientErrors :: ((Throws VerificationError, Throws SomeRemoteError, Throws InvalidPackageException) => IO a) -> IO a
-- | Errors thrown during role validation
data VerificationError
-- | Not enough signatures signed with the appropriate keys
VerificationErrorSignatures :: TargetPath -> VerificationError
-- | The file is expired
VerificationErrorExpired :: TargetPath -> VerificationError
-- | The file version is less than the previous version
VerificationErrorVersion :: TargetPath -> VerificationError
-- | File information mismatch
VerificationErrorFileInfo :: TargetPath -> VerificationError
-- | We tried to lookup file information about a particular target file,
-- but the information wasn't in the corresponding targets.json
-- file.
VerificationErrorUnknownTarget :: TargetPath -> VerificationError
-- | The metadata for the specified target is missing a SHA256
VerificationErrorMissingSHA256 :: TargetPath -> VerificationError
-- | Some verification errors materialize as deserialization errors
--
-- For example: if we try to deserialize a timestamp file but the
-- timestamp key has been rolled over, deserialization of the file will
-- fail with DeserializationErrorUnknownKey.
VerificationErrorDeserialization :: TargetPath -> DeserializationError -> VerificationError
-- | The spec stipulates that if a verification error occurs during the
-- check for updates, we must download new root information and start
-- over. However, we limit how often we attempt this.
--
-- We record all verification errors that occurred before we gave up.
VerificationErrorLoop :: VerificationHistory -> VerificationError
type VerificationHistory = [Either RootUpdated VerificationError]
-- | Root metadata updated (as part of the normal update process)
data RootUpdated
RootUpdated :: RootUpdated
data InvalidPackageException
InvalidPackageException :: PackageIdentifier -> InvalidPackageException
data InvalidFileInIndex
InvalidFileInIndex :: IndexFile dec -> ByteString -> DeserializationError -> InvalidFileInIndex
[invalidFileInIndex] :: InvalidFileInIndex -> IndexFile dec
[invalidFileInIndexRaw] :: InvalidFileInIndex -> ByteString
[invalidFileInIndexError] :: InvalidFileInIndex -> DeserializationError
data LocalFileCorrupted
LocalFileCorrupted :: DeserializationError -> LocalFileCorrupted
instance GHC.Classes.Ord Hackage.Security.Client.DirectoryEntry
instance GHC.Classes.Eq Hackage.Security.Client.DirectoryEntry
instance GHC.Classes.Ord Hackage.Security.Client.HasUpdates
instance GHC.Classes.Eq Hackage.Security.Client.HasUpdates
instance GHC.Show.Show Hackage.Security.Client.HasUpdates
instance GHC.Show.Show Hackage.Security.Client.InvalidPackageException
instance GHC.Show.Show Hackage.Security.Client.LocalFileCorrupted
instance GHC.Show.Show Hackage.Security.Client.InvalidFileInIndex
instance GHC.Exception.Type.Exception Hackage.Security.Client.InvalidFileInIndex
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.InvalidFileInIndex
instance GHC.Exception.Type.Exception Hackage.Security.Client.LocalFileCorrupted
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.LocalFileCorrupted
instance GHC.Exception.Type.Exception Hackage.Security.Client.InvalidPackageException
instance Hackage.Security.Util.Pretty.Pretty Hackage.Security.Client.InvalidPackageException
instance GHC.Show.Show Hackage.Security.Client.DirectoryEntry
instance GHC.Read.Read Hackage.Security.Client.DirectoryEntry