- cacheHash :: ByteString -> String
- okayHash :: String -> Bool
- takeHash :: ByteString -> Maybe (String, ByteString)
- newtype Cache = Ca [CacheLoc]
- data CacheType
- data CacheLoc = Cache !CacheType !WritableOrNot !String
- data WritableOrNot
- = Writable
- | NotWritable
- data HashedDir
- hashedDir :: HashedDir -> String
- unionCaches :: Cache -> Cache -> Cache
- unionRemoteCaches :: Cache -> Cache -> String -> IO Cache
- cleanCaches :: Cache -> HashedDir -> IO ()
- cleanCachesWithHint :: Cache -> HashedDir -> [String] -> IO ()
- fetchFileUsingCache :: Cache -> HashedDir -> String -> IO (String, ByteString)
- speculateFileUsingCache :: Cache -> HashedDir -> String -> IO ()
- speculateFilesUsingCache :: Cache -> HashedDir -> [String] -> IO ()
- writeFileUsingCache :: Cache -> Compression -> HashedDir -> ByteString -> IO String
- peekInCache :: Cache -> HashedDir -> String -> IO Bool
- repo2cache :: String -> Cache
- writable :: CacheLoc -> Bool
- isthisrepo :: CacheLoc -> Bool
- hashedFilePath :: CacheLoc -> HashedDir -> String -> String
- allHashedDirs :: [HashedDir]
- compareByLocality :: CacheLoc -> CacheLoc -> Ordering
Documentation
cacheHash :: ByteString -> StringSource
cacheHash
computes the cache hash (i.e. filename) of a packed string.
takeHash :: ByteString -> Maybe (String, ByteString)Source
unionCaches :: Cache -> Cache -> CacheSource
unionRemoteCaches :: Cache -> Cache -> String -> IO CacheSource
unionRemoteCaches merges caches. It tries to do better than just blindly copying remote cache entries:
- If remote repository is accessed through network, do not copy any cache entries from it. Taking local entries does not make sense and using network entries can lead to darcs hang when it tries to get to unaccessible host.
- If remote repositoty is local, copy all network cache entries. For local cache entries if the cache directory exists and is writable it is added as writable cache, if it exists but is not writable it is added as read-only cache.
This approach should save us from bogus cache entries. One case it does not work very well is when you fetch from partial repository over network. Hopefully this is not a common case.
fetchFileUsingCache :: Cache -> HashedDir -> String -> IO (String, ByteString)Source
speculateFileUsingCache :: Cache -> HashedDir -> String -> IO ()Source
speculateFileUsingCache cache subdirectory name
takes note that
the file name
is likely to be useful soon: pipelined downloads
will add it to the (low-priority) queue, for the rest it is a noop.
speculateFilesUsingCache :: Cache -> HashedDir -> [String] -> IO ()Source
Note that the files are likely to be useful soon: pipelined downloads will add them to the (low-priority) queue, for the rest it is a noop.
writeFileUsingCache :: Cache -> Compression -> HashedDir -> ByteString -> IO StringSource
writeFileUsingCache cache compression subdir contents
write the string contents
to
the directory subdir, except if it is already in the cache, in which case it is a noop.
Warning (?) this means that in case of a hash collision, writing using writeFileUsingCache is
a noop. The returned value is the filename that was given to the string.
peekInCache :: Cache -> HashedDir -> String -> IO BoolSource
peekInCache cache subdir hash
tells whether cache
and
contains an object with hash hash
in a writable position.
Florent: why do we want it to be in a writable position?
repo2cache :: String -> CacheSource
isthisrepo :: CacheLoc -> BoolSource
hashedFilePath :: CacheLoc -> HashedDir -> String -> StringSource
hashedFilePath cachelocation subdir hash
returns the physical filename of
hash hash
in the subdir
section of cachelocation
.
compareByLocality :: CacheLoc -> CacheLoc -> OrderingSource
Compares two caches, a remote cache is greater than a local one. The order of the comparison is given by: local < http < ssh