-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Robust, reliable performance measurement and analysis -- -- This library provides a powerful but simple way to measure software -- performance. It provides both a framework for executing and analysing -- benchmarks and a set of driver functions that makes it easy to build -- and run benchmarks, and to analyse their results. -- -- The fastest way to get started is to read the online tutorial, -- followed by the documentation and examples in the -- Criterion.Main module. -- -- For examples of the kinds of reports that criterion generates, see -- the home page. @package criterion @version 1.2.2.0 -- | Types for benchmarking. -- -- The core type is Benchmarkable, which admits both pure -- functions and IO actions. -- -- For a pure function of type a -> b, the benchmarking -- harness calls this function repeatedly, each time with a different -- Int64 argument (the number of times to run the function in a -- loop), and reduces the result the function returns to weak head normal -- form. -- -- For an action of type IO a, the benchmarking harness calls -- the action repeatedly, but does not reduce the result. module Criterion.Types -- | Top-level benchmarking configuration. data Config Config :: CL Double -> Bool -> Double -> Int -> [([String], String)] -> Maybe FilePath -> Maybe FilePath -> Maybe FilePath -> Maybe FilePath -> Maybe FilePath -> Verbosity -> FilePath -> Config -- | Confidence interval for bootstrap estimation (greater than 0, less -- than 1). [confInterval] :: Config -> CL Double -- | Obsolete, unused. This option used to force garbage collection -- between every benchmark run, but it no longer has an effect (we now -- unconditionally force garbage collection). This option remains solely -- for backwards API compatibility. [forceGC] :: Config -> Bool -- | Number of seconds to run a single benchmark. (In practice, execution -- time will very slightly exceed this limit.) [timeLimit] :: Config -> Double -- | Number of resamples to perform when bootstrapping. [resamples] :: Config -> Int -- | Regressions to perform. [regressions] :: Config -> [([String], String)] -- | File to write binary measurement and analysis data to. If not -- specified, this will be a temporary file. [rawDataFile] :: Config -> Maybe FilePath -- | File to write report output to, with template expanded. [reportFile] :: Config -> Maybe FilePath -- | File to write CSV summary to. [csvFile] :: Config -> Maybe FilePath -- | File to write JSON-formatted results to. [jsonFile] :: Config -> Maybe FilePath -- | File to write JUnit-compatible XML results to. [junitFile] :: Config -> Maybe FilePath -- | Verbosity level to use when running and analysing benchmarks. [verbosity] :: Config -> Verbosity -- | Template file to use if writing a report. [template] :: Config -> FilePath -- | Control the amount of information displayed. data Verbosity Quiet :: Verbosity Normal :: Verbosity Verbose :: Verbosity -- | A pure function or impure action that can be benchmarked. The -- Int64 parameter indicates the number of times to run the given -- function or action. data Benchmarkable Benchmarkable :: (Int64 -> IO a) -> (Int64 -> a -> IO ()) -> (a -> Int64 -> IO ()) -> Bool -> Benchmarkable [allocEnv] :: Benchmarkable -> Int64 -> IO a [cleanEnv] :: Benchmarkable -> Int64 -> a -> IO () [runRepeatedly] :: Benchmarkable -> a -> Int64 -> IO () [perRun] :: Benchmarkable -> Bool -- | Specification of a collection of benchmarks and environments. A -- benchmark may consist of: -- --
-- setupEnv = do -- let small = replicate 1000 (1 :: Int) -- big <- map length . words <$> readFile "/usr/dict/words" -- return (small, big) -- -- main = defaultMain [ -- -- notice the lazy pattern match here! -- env setupEnv $ \ ~(small,big) -> bgroup "main" [ -- bgroup "small" [ -- bench "length" $ whnf length small -- , bench "length . filter" $ whnf (length . filter (==1)) small -- ] -- , bgroup "big" [ -- bench "length" $ whnf length big -- , bench "length . filter" $ whnf (length . filter (==1)) big -- ] -- ] ] ---- -- Discussion. The environment created in the example above is -- intentionally not ideal. As Haskell's scoping rules suggest, -- the variable big is in scope for the benchmarks that use only -- small. It would be better to create a separate environment -- for big, so that it will not be kept alive while the -- unrelated benchmarks are being run. env :: NFData env => IO env -> (env -> Benchmark) -> Benchmark -- | Same as env, but but allows for an additional callback to clean -- up the environment. Resource clean up is exception safe, that is, it -- runs even if the Benchmark throws an exception. envWithCleanup :: NFData env => IO env -> (env -> IO a) -> (env -> Benchmark) -> Benchmark -- | Create a Benchmarkable where a fresh environment is allocated for -- every batch of runs of the benchmarkable. -- -- The environment is evaluated to normal form before the benchmark is -- run. -- -- When using whnf, whnfIO, etc. Criterion creates a -- Benchmarkable whichs runs a batch of N repeat runs of -- that expressions. Criterion may run any number of these batches to get -- accurate measurements. Environments created by env and -- envWithCleanup, are shared across all these batches of runs. -- -- This is fine for simple benchmarks on static input, but when -- benchmarking IO operations where these operations can modify (and -- especially grow) the environment this means that later batches might -- have their accuracy effected due to longer, for example, longer -- garbage collection pauses. -- -- An example: Suppose we want to benchmark writing to a Chan, if we -- allocate the Chan using environment and our benchmark consists of -- writeChan env (), the contents and thus size of the Chan will -- grow with every repeat. If Criterion runs a 1,000 batches of 1,000 -- repeats, the result is that the channel will have 999,000 items in it -- by the time the last batch is run. Since GHC GC has to copy the live -- set for every major GC this means our last set of writes will suffer a -- lot of noise of the previous repeats. -- -- By allocating a fresh environment for every batch of runs this -- function should eliminate this effect. perBatchEnv :: (NFData env, NFData b) => (Int64 -> IO env) -> (env -> IO b) -> Benchmarkable -- | Same as perBatchEnv, but but allows for an additional callback -- to clean up the environment. Resource clean up is exception safe, that -- is, it runs even if the Benchmark throws an exception. perBatchEnvWithCleanup :: (NFData env, NFData b) => (Int64 -> IO env) -> (Int64 -> env -> IO ()) -> (env -> IO b) -> Benchmarkable -- | Create a Benchmarkable where a fresh environment is allocated for -- every run of the operation to benchmark. This is useful for -- benchmarking mutable operations that need a fresh environment, such as -- sorting a mutable Vector. -- -- As with env and perBatchEnv the environment is evaluated -- to normal form before the benchmark is run. -- -- This introduces extra noise and result in reduce accuracy compared to -- other Criterion benchmarks. But allows easier benchmarking for mutable -- operations than was previously possible. perRunEnv :: (NFData env, NFData b) => IO env -> (env -> IO b) -> Benchmarkable -- | Same as perRunEnv, but but allows for an additional callback to -- clean up the environment. Resource clean up is exception safe, that -- is, it runs even if the Benchmark throws an exception. perRunEnvWithCleanup :: (NFData env, NFData b) => IO env -> (env -> IO ()) -> (env -> IO b) -> Benchmarkable -- | Construct a Benchmarkable value from an impure action, where -- the Int64 parameter indicates the number of times to run the -- action. toBenchmarkable :: (Int64 -> IO ()) -> Benchmarkable -- | Create a single benchmark. bench :: String -> Benchmarkable -> Benchmark -- | Group several benchmarks together under a common name. bgroup :: String -> [Benchmark] -> Benchmark -- | Add the given prefix to a name. If the prefix is empty, the name is -- returned unmodified. Otherwise, the prefix and name are separated by a -- '/' character. addPrefix :: String -> String -> String -- | Retrieve the names of all benchmarks. Grouped benchmarks are prefixed -- with the name of the group they're in. benchNames :: Benchmark -> [String] -- | Apply an argument to a function, and evaluate the result to weak head -- normal form (WHNF). whnf :: (a -> b) -> a -> Benchmarkable -- | Apply an argument to a function, and evaluate the result to normal -- form (NF). nf :: NFData b => (a -> b) -> a -> Benchmarkable -- | Perform an action, then evaluate its result to normal form. This is -- particularly useful for forcing a lazy IO action to be -- completely performed. nfIO :: NFData a => IO a -> Benchmarkable -- | Perform an action, then evaluate its result to weak head normal form -- (WHNF). This is useful for forcing an IO action whose result is -- an expression to be evaluated down to a more useful value. whnfIO :: IO a -> Benchmarkable -- | Outliers from sample data, calculated using the boxplot technique. data Outliers Outliers :: !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> Outliers [samplesSeen] :: Outliers -> !Int64 -- | More than 3 times the interquartile range (IQR) below the first -- quartile. [lowSevere] :: Outliers -> !Int64 -- | Between 1.5 and 3 times the IQR below the first quartile. [lowMild] :: Outliers -> !Int64 -- | Between 1.5 and 3 times the IQR above the third quartile. [highMild] :: Outliers -> !Int64 -- | More than 3 times the IQR above the third quartile. [highSevere] :: Outliers -> !Int64 -- | A description of the extent to which outliers in the sample data -- affect the sample mean and standard deviation. data OutlierEffect -- | Less than 1% effect. Unaffected :: OutlierEffect -- | Between 1% and 10%. Slight :: OutlierEffect -- | Between 10% and 50%. Moderate :: OutlierEffect -- | Above 50% (i.e. measurements are useless). Severe :: OutlierEffect -- | Analysis of the extent to which outliers in a sample affect its -- standard deviation (and to some extent, its mean). data OutlierVariance OutlierVariance :: OutlierEffect -> String -> Double -> OutlierVariance -- | Qualitative description of effect. [ovEffect] :: OutlierVariance -> OutlierEffect -- | Brief textual description of effect. [ovDesc] :: OutlierVariance -> String -- | Quantitative description of effect (a fraction between 0 and 1). [ovFraction] :: OutlierVariance -> Double -- | Results of a linear regression. data Regression Regression :: String -> Map String (Estimate ConfInt Double) -> Estimate ConfInt Double -> Regression -- | Name of the responding variable. [regResponder] :: Regression -> String -- | Map from name to value of predictor coefficients. [regCoeffs] :: Regression -> Map String (Estimate ConfInt Double) -- | R² goodness-of-fit estimate. [regRSquare] :: Regression -> Estimate ConfInt Double -- | Data for a KDE chart of performance. data KDE KDE :: String -> Vector Double -> Vector Double -> KDE [kdeType] :: KDE -> String [kdeValues] :: KDE -> Vector Double [kdePDF] :: KDE -> Vector Double -- | Report of a sample analysis. data Report Report :: Int -> String -> [String] -> Vector Measured -> SampleAnalysis -> Outliers -> [KDE] -> Report -- | A simple index indicating that this is the nth report. [reportNumber] :: Report -> Int -- | The name of this report. [reportName] :: Report -> String -- | See measureKeys. [reportKeys] :: Report -> [String] -- | Raw measurements. These are not corrected for the estimated -- measurement overhead that can be found via the anOverhead field -- of reportAnalysis. [reportMeasured] :: Report -> Vector Measured -- | Report analysis. [reportAnalysis] :: Report -> SampleAnalysis -- | Analysis of outliers. [reportOutliers] :: Report -> Outliers -- | Data for a KDE of times. [reportKDEs] :: Report -> [KDE] -- | Result of a bootstrap analysis of a non-parametric sample. data SampleAnalysis SampleAnalysis :: [Regression] -> Double -> Estimate ConfInt Double -> Estimate ConfInt Double -> OutlierVariance -> SampleAnalysis -- | Estimates calculated via linear regression. [anRegress] :: SampleAnalysis -> [Regression] -- | Estimated measurement overhead, in seconds. Estimation is performed -- via linear regression. [anOverhead] :: SampleAnalysis -> Double -- | Estimated mean. [anMean] :: SampleAnalysis -> Estimate ConfInt Double -- | Estimated standard deviation. [anStdDev] :: SampleAnalysis -> Estimate ConfInt Double -- | Description of the effects of outliers on the estimated variance. [anOutlierVar] :: SampleAnalysis -> OutlierVariance data DataRecord Measurement :: Int -> String -> (Vector Measured) -> DataRecord Analysed :: Report -> DataRecord instance GHC.Generics.Generic Criterion.Types.DataRecord instance GHC.Show.Show Criterion.Types.DataRecord instance GHC.Read.Read Criterion.Types.DataRecord instance GHC.Classes.Eq Criterion.Types.DataRecord instance GHC.Generics.Generic Criterion.Types.Report instance GHC.Show.Show Criterion.Types.Report instance GHC.Read.Read Criterion.Types.Report instance GHC.Classes.Eq Criterion.Types.Report instance GHC.Generics.Generic Criterion.Types.KDE instance Data.Data.Data Criterion.Types.KDE instance GHC.Show.Show Criterion.Types.KDE instance GHC.Read.Read Criterion.Types.KDE instance GHC.Classes.Eq Criterion.Types.KDE instance GHC.Generics.Generic Criterion.Types.SampleAnalysis instance GHC.Show.Show Criterion.Types.SampleAnalysis instance GHC.Read.Read Criterion.Types.SampleAnalysis instance GHC.Classes.Eq Criterion.Types.SampleAnalysis instance GHC.Generics.Generic Criterion.Types.Regression instance GHC.Show.Show Criterion.Types.Regression instance GHC.Read.Read Criterion.Types.Regression instance GHC.Classes.Eq Criterion.Types.Regression instance GHC.Generics.Generic Criterion.Types.OutlierVariance instance Data.Data.Data Criterion.Types.OutlierVariance instance GHC.Show.Show Criterion.Types.OutlierVariance instance GHC.Read.Read Criterion.Types.OutlierVariance instance GHC.Classes.Eq Criterion.Types.OutlierVariance instance GHC.Generics.Generic Criterion.Types.OutlierEffect instance Data.Data.Data Criterion.Types.OutlierEffect instance GHC.Show.Show Criterion.Types.OutlierEffect instance GHC.Read.Read Criterion.Types.OutlierEffect instance GHC.Classes.Ord Criterion.Types.OutlierEffect instance GHC.Classes.Eq Criterion.Types.OutlierEffect instance GHC.Generics.Generic Criterion.Types.Outliers instance Data.Data.Data Criterion.Types.Outliers instance GHC.Show.Show Criterion.Types.Outliers instance GHC.Read.Read Criterion.Types.Outliers instance GHC.Classes.Eq Criterion.Types.Outliers instance GHC.Generics.Generic Criterion.Types.Measured instance Data.Data.Data Criterion.Types.Measured instance GHC.Show.Show Criterion.Types.Measured instance GHC.Read.Read Criterion.Types.Measured instance GHC.Classes.Eq Criterion.Types.Measured instance GHC.Generics.Generic Criterion.Types.Config instance Data.Data.Data Criterion.Types.Config instance GHC.Show.Show Criterion.Types.Config instance GHC.Read.Read Criterion.Types.Config instance GHC.Classes.Eq Criterion.Types.Config instance GHC.Generics.Generic Criterion.Types.Verbosity instance Data.Data.Data Criterion.Types.Verbosity instance GHC.Show.Show Criterion.Types.Verbosity instance GHC.Read.Read Criterion.Types.Verbosity instance GHC.Enum.Enum Criterion.Types.Verbosity instance GHC.Enum.Bounded Criterion.Types.Verbosity instance GHC.Classes.Ord Criterion.Types.Verbosity instance GHC.Classes.Eq Criterion.Types.Verbosity instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.Measured instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.Measured instance Control.DeepSeq.NFData Criterion.Types.Measured instance Data.Binary.Class.Binary Criterion.Types.Measured instance GHC.Show.Show Criterion.Types.Benchmark instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.Outliers instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.Outliers instance Data.Binary.Class.Binary Criterion.Types.Outliers instance Control.DeepSeq.NFData Criterion.Types.Outliers instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.OutlierEffect instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.OutlierEffect instance Data.Binary.Class.Binary Criterion.Types.OutlierEffect instance Control.DeepSeq.NFData Criterion.Types.OutlierEffect instance GHC.Base.Monoid Criterion.Types.Outliers instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.OutlierVariance instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.OutlierVariance instance Data.Binary.Class.Binary Criterion.Types.OutlierVariance instance Control.DeepSeq.NFData Criterion.Types.OutlierVariance instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.Regression instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.Regression instance Data.Binary.Class.Binary Criterion.Types.Regression instance Control.DeepSeq.NFData Criterion.Types.Regression instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.SampleAnalysis instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.SampleAnalysis instance Data.Binary.Class.Binary Criterion.Types.SampleAnalysis instance Control.DeepSeq.NFData Criterion.Types.SampleAnalysis instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.KDE instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.KDE instance Data.Binary.Class.Binary Criterion.Types.KDE instance Control.DeepSeq.NFData Criterion.Types.KDE instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.Report instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.Report instance Data.Binary.Class.Binary Criterion.Types.Report instance Control.DeepSeq.NFData Criterion.Types.Report instance Data.Binary.Class.Binary Criterion.Types.DataRecord instance Control.DeepSeq.NFData Criterion.Types.DataRecord instance Data.Aeson.Types.FromJSON.FromJSON Criterion.Types.DataRecord instance Data.Aeson.Types.ToJSON.ToJSON Criterion.Types.DataRecord -- | Benchmark measurement code. module Criterion.Measurement -- | Set up time measurement. initializeTime :: IO () -- | Return the current wallclock time, in seconds since some arbitrary -- time. -- -- You must call initializeTime once before calling this -- function! getTime :: IO Double -- | Return the amount of elapsed CPU time, combining user and kernel -- (system) time into a single measure. getCPUTime :: IO Double -- | Read the CPU cycle counter. getCycles :: IO Word64 -- | Try to get GC statistics, bearing in mind that the GHC runtime will -- throw an exception if statistics collection was not enabled using -- "+RTS -T". getGCStatistics :: IO (Maybe GCStatistics) -- | Statistics about memory usage and the garbage collector. Apart from -- gcStatsCurrentBytesUsed and gcStatsCurrentBytesSlop all -- are cumulative values since the program started. -- -- GCStatistics is cargo-culted from the GCStats data type -- that GHC.Stats has. Since GCStats was marked as -- deprecated and will be removed in GHC 8.4, we use GCStatistics -- to provide a backwards-compatible view of GC statistics. data GCStatistics GCStatistics :: !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Double -> !Double -> !Double -> !Double -> !Double -> !Double -> GCStatistics -- | Total number of bytes allocated [gcStatsBytesAllocated] :: GCStatistics -> !Int64 -- | Number of garbage collections performed (any generation, major and -- minor) [gcStatsNumGcs] :: GCStatistics -> !Int64 -- | Maximum number of live bytes seen so far [gcStatsMaxBytesUsed] :: GCStatistics -> !Int64 -- | Number of byte usage samples taken, or equivalently the number of -- major GCs performed. [gcStatsNumByteUsageSamples] :: GCStatistics -> !Int64 -- | Sum of all byte usage samples, can be used with -- gcStatsNumByteUsageSamples to calculate averages with arbitrary -- weighting (if you are sampling this record multiple times). [gcStatsCumulativeBytesUsed] :: GCStatistics -> !Int64 -- | Number of bytes copied during GC [gcStatsBytesCopied] :: GCStatistics -> !Int64 -- | Number of live bytes at the end of the last major GC [gcStatsCurrentBytesUsed] :: GCStatistics -> !Int64 -- | Current number of bytes lost to slop [gcStatsCurrentBytesSlop] :: GCStatistics -> !Int64 -- | Maximum number of bytes lost to slop at any one time so far [gcStatsMaxBytesSlop] :: GCStatistics -> !Int64 -- | Maximum number of megabytes allocated [gcStatsPeakMegabytesAllocated] :: GCStatistics -> !Int64 -- | CPU time spent running mutator threads. This does not include any -- profiling overhead or initialization. [gcStatsMutatorCpuSeconds] :: GCStatistics -> !Double -- | Wall clock time spent running mutator threads. This does not include -- initialization. [gcStatsMutatorWallSeconds] :: GCStatistics -> !Double -- | CPU time spent running GC [gcStatsGcCpuSeconds] :: GCStatistics -> !Double -- | Wall clock time spent running GC [gcStatsGcWallSeconds] :: GCStatistics -> !Double -- | Total CPU time elapsed since program start [gcStatsCpuSeconds] :: GCStatistics -> !Double -- | Total wall clock time elapsed since start [gcStatsWallSeconds] :: GCStatistics -> !Double -- | Convert a number of seconds to a string. The string will consist of -- four decimal places, followed by a short description of the time -- units. secs :: Double -> String -- | Measure the execution of a benchmark a given number of times. measure :: Benchmarkable -> Int64 -> IO (Measured, Double) -- | Run a single benchmark, and return measurements collected while -- executing it, along with the amount of time the measurement process -- took. runBenchmark :: Benchmarkable -> Double -> IO (Vector Measured, Double) runBenchmarkable :: Benchmarkable -> Int64 -> (a -> a -> a) -> (IO () -> IO a) -> IO a runBenchmarkable_ :: Benchmarkable -> Int64 -> IO () -- | An empty structure. measured :: Measured -- | Apply the difference between two sets of GC statistics to a -- measurement. applyGCStatistics :: Maybe GCStatistics -> Maybe GCStatistics -> Measured -> Measured -- | The amount of time a benchmark must run for in order for us to have -- some trust in the raw measurement. -- -- We set this threshold so that we can generate enough data to later -- perform meaningful statistical analyses. -- -- The threshold is 30 milliseconds. One use of runBenchmark must -- accumulate more than 300 milliseconds of total measurements above this -- threshold before it will finish. threshold :: Double -- | Try to get GC statistics, bearing in mind that the GHC runtime will -- throw an exception if statistics collection was not enabled using -- "+RTS -T". -- | Deprecated: GCStats has been deprecated in GHC 8.2. As a -- consequence,getGCStats has also been deprecated in favor of -- getGCStatistics.getGCStats will be removed in the next major criterion -- release. getGCStats :: IO (Maybe GCStats) -- | Apply the difference between two sets of GC statistics to a -- measurement. -- | Deprecated: GCStats has been deprecated in GHC 8.2. As a -- consequence,applyGCStats has also been deprecated in favor of -- applyGCStatistics.applyGCStats will be removed in the next major -- criterion release. applyGCStats :: Maybe GCStats -> Maybe GCStats -> Measured -> Measured instance GHC.Generics.Generic Criterion.Measurement.GCStatistics instance Data.Data.Data Criterion.Measurement.GCStatistics instance GHC.Show.Show Criterion.Measurement.GCStatistics instance GHC.Read.Read Criterion.Measurement.GCStatistics instance GHC.Classes.Eq Criterion.Measurement.GCStatistics -- | The environment in which most criterion code executes. module Criterion.Monad -- | The monad in which most criterion code executes. data Criterion a -- | Run a Criterion action with the given Config. withConfig :: Config -> Criterion a -> IO a -- | Return a random number generator, creating one if necessary. -- -- This is not currently thread-safe, but in a harmless way (we might -- call createSystemRandom more than once if multiple threads -- race). getGen :: Criterion GenIO -- | Return an estimate of the measurement overhead. getOverhead :: Criterion Double -- | Reporting functions. module Criterion.Report -- | Format a series of Report values using the given Mustache -- template. formatReport :: [Report] -> Text -> IO Text -- | Write out a series of Report values to a single file, if -- configured to do so. report :: [Report] -> Criterion () -- | Trim long flat tails from a KDE plot. tidyTails :: KDE -> KDE -- | A problem arose with a template. data TemplateException -- | The template could not be found. TemplateNotFound :: FilePath -> TemplateException -- | Load a Mustache template file. -- -- If the name is an absolute or relative path, the search path is -- not used, and the name is treated as a literal path. -- -- This function throws a TemplateException if the template could -- not be found, or an IOException if no template could be loaded. loadTemplate :: [FilePath] -> FilePath -> IO Text -- | Attempt to include the contents of a file based on a search path. -- Returns empty if the search fails or the file could not be -- read. -- -- Intended for preprocessing Mustache files, e.g. replacing sections -- --
-- {{#include}}file.txt{{/include}
--
--
-- with file contents.
includeFile :: (MonadIO m) => [FilePath] -> FilePath -> m Text
-- | Return the path to the template and other files used for generating
-- reports.
getTemplateDir :: IO FilePath
-- | Render the elements of a vector.
--
-- It will substitute each value in the vector for x in the
-- following Mustache template:
--
--
-- {{#foo}}
-- {{x}}
-- {{/foo}}
--
vector :: (Vector v a, ToJSON a) => Text -> v a -> Value
-- | Render the elements of two vectors.
vector2 :: (Vector v a, Vector v b, ToJSON a, ToJSON b) => Text -> Text -> v a -> v b -> Value
instance GHC.Generics.Generic Criterion.Report.TemplateException
instance Data.Data.Data Criterion.Report.TemplateException
instance GHC.Show.Show Criterion.Report.TemplateException
instance GHC.Read.Read Criterion.Report.TemplateException
instance GHC.Classes.Eq Criterion.Report.TemplateException
instance GHC.Exception.Exception Criterion.Report.TemplateException
-- | Input and output actions.
module Criterion.IO.Printf
-- | An internal class that acts like Printf/HPrintf.
--
-- The implementation is visible to the rest of the program, but the
-- details of the class are not.
class CritHPrintfType a
-- | Print a "normal" note.
note :: (CritHPrintfType r) => String -> r
-- | Print an error message.
printError :: (CritHPrintfType r) => String -> r
-- | Print verbose output.
prolix :: (CritHPrintfType r) => String -> r
-- | Write a record to a CSV file.
writeCsv :: ToRecord a => a -> Criterion ()
instance Criterion.IO.Printf.CritHPrintfType (Criterion.Monad.Internal.Criterion a)
instance Criterion.IO.Printf.CritHPrintfType (GHC.Types.IO a)
instance (Criterion.IO.Printf.CritHPrintfType r, Text.Printf.PrintfArg a) => Criterion.IO.Printf.CritHPrintfType (a -> r)
-- | Input and output actions.
module Criterion.IO
-- | The header identifies a criterion data file. This contains version
-- information; there is no expectation of cross-version compatibility.
header :: ByteString
-- | The magic string we expect to start off the header.
headerRoot :: String
-- | The current version of criterion, encoded into a string that is used
-- in files.
critVersion :: String
-- | Read all records from the given Handle.
hGetRecords :: Binary a => Handle -> IO (Either String [a])
-- | Write records to the given Handle.
hPutRecords :: Binary a => Handle -> [a] -> IO ()
-- | Read all records from the given file.
readRecords :: Binary a => FilePath -> IO (Either String [a])
-- | Write records to the given file.
writeRecords :: Binary a => FilePath -> [a] -> IO ()
-- | On disk we store (name,version,reports), where version is the
-- version of Criterion used to generate the file.
type ReportFileContents = (String, String, [Report])
-- | Alternative file IO with JSON instances. Read a list of reports from a
-- .json file produced by criterion.
--
-- If the version does not match exactly, this issues a warning.
readJSONReports :: FilePath -> IO (Either String ReportFileContents)
-- | Write a list of reports to a JSON file. Includes a header, which
-- includes the current Criterion version number. This should be the
-- inverse of readJSONReports.
writeJSONReports :: FilePath -> [Report] -> IO ()
-- | Analysis code for benchmarks.
module Criterion.Analysis
-- | Outliers from sample data, calculated using the boxplot technique.
data Outliers
Outliers :: !Int64 -> !Int64 -> !Int64 -> !Int64 -> !Int64 -> Outliers
[samplesSeen] :: Outliers -> !Int64
-- | More than 3 times the interquartile range (IQR) below the first
-- quartile.
[lowSevere] :: Outliers -> !Int64
-- | Between 1.5 and 3 times the IQR below the first quartile.
[lowMild] :: Outliers -> !Int64
-- | Between 1.5 and 3 times the IQR above the third quartile.
[highMild] :: Outliers -> !Int64
-- | More than 3 times the IQR above the third quartile.
[highSevere] :: Outliers -> !Int64
-- | A description of the extent to which outliers in the sample data
-- affect the sample mean and standard deviation.
data OutlierEffect
-- | Less than 1% effect.
Unaffected :: OutlierEffect
-- | Between 1% and 10%.
Slight :: OutlierEffect
-- | Between 10% and 50%.
Moderate :: OutlierEffect
-- | Above 50% (i.e. measurements are useless).
Severe :: OutlierEffect
-- | Analysis of the extent to which outliers in a sample affect its
-- standard deviation (and to some extent, its mean).
data OutlierVariance
OutlierVariance :: OutlierEffect -> String -> Double -> OutlierVariance
-- | Qualitative description of effect.
[ovEffect] :: OutlierVariance -> OutlierEffect
-- | Brief textual description of effect.
[ovDesc] :: OutlierVariance -> String
-- | Quantitative description of effect (a fraction between 0 and 1).
[ovFraction] :: OutlierVariance -> Double
-- | Result of a bootstrap analysis of a non-parametric sample.
data SampleAnalysis
SampleAnalysis :: [Regression] -> Double -> Estimate ConfInt Double -> Estimate ConfInt Double -> OutlierVariance -> SampleAnalysis
-- | Estimates calculated via linear regression.
[anRegress] :: SampleAnalysis -> [Regression]
-- | Estimated measurement overhead, in seconds. Estimation is performed
-- via linear regression.
[anOverhead] :: SampleAnalysis -> Double
-- | Estimated mean.
[anMean] :: SampleAnalysis -> Estimate ConfInt Double
-- | Estimated standard deviation.
[anStdDev] :: SampleAnalysis -> Estimate ConfInt Double
-- | Description of the effects of outliers on the estimated variance.
[anOutlierVar] :: SampleAnalysis -> OutlierVariance
-- | Perform an analysis of a measurement.
analyseSample :: Int -> String -> Vector Measured -> ExceptT String Criterion Report
-- | Multiply the Estimates in an analysis by the given value,
-- using scale.
scale :: Double -> SampleAnalysis -> SampleAnalysis
-- | Display the mean of a Sample, and characterise the outliers
-- present in the sample.
analyseMean :: Sample -> Int -> Criterion Double
-- | Count the total number of outliers in a sample.
countOutliers :: Outliers -> Int64
-- | Classify outliers in a data set, using the boxplot technique.
classifyOutliers :: Sample -> Outliers
-- | Display a report of the Outliers present in a Sample.
noteOutliers :: Outliers -> Criterion ()
-- | Compute the extent to which outliers in the sample data affect the
-- sample mean and standard deviation.
outlierVariance :: Estimate ConfInt Double -> Estimate ConfInt Double -> Double -> OutlierVariance
-- | Given a list of accessor names (see measureKeys), return either
-- a mapping from accessor name to function or an error message if any
-- names are wrong.
resolveAccessors :: [String] -> Either String [(String, Measured -> Maybe Double)]
-- | Given predictor and responder names, do some basic validation, then
-- hand back the relevant accessors.
validateAccessors :: [String] -> String -> Either String [(String, Measured -> Maybe Double)]
-- | Regress the given predictors against the responder.
--
-- Errors may be returned under various circumstances, such as invalid
-- names or lack of needed data.
--
-- See olsRegress for details of the regression performed.
regress :: GenIO -> [String] -> String -> Vector Measured -> ExceptT String Criterion Regression
-- | Core benchmarking code.
module Criterion.Internal
-- | Run, and analyse, one or more benchmarks.
runAndAnalyse :: (String -> Bool) -> Benchmark -> Criterion ()
-- | Run a single benchmark and analyse its performance.
runAndAnalyseOne :: Int -> String -> Benchmarkable -> Criterion DataRecord
-- | Run a single benchmark.
runOne :: Int -> String -> Benchmarkable -> Criterion DataRecord
-- | Run a benchmark without analysing its performance.
runFixedIters :: Int64 -> (String -> Bool) -> Benchmark -> Criterion ()
-- | Benchmarking command-line configuration.
module Criterion.Main.Options
-- | Execution mode for a benchmark program.
data Mode
-- | List all benchmarks.
List :: Mode
-- | Print the version.
Version :: Mode
-- | Run the given benchmarks, without collecting or analysing performance
-- numbers.
RunIters :: Config -> Int64 -> MatchType -> [String] -> Mode
-- | Run and analyse the given benchmarks.
Run :: Config -> MatchType -> [String] -> Mode
-- | How to match a benchmark name.
data MatchType
-- | Match by prefix. For example, a prefix of "foo" will match
-- "foobar".
Prefix :: MatchType
-- | Match by Unix-style glob pattern.
Glob :: MatchType
-- | Match by searching given substring in benchmark paths.
Pattern :: MatchType
-- | Same as Pattern, but case insensitive.
IPattern :: MatchType
-- | Default benchmarking configuration.
defaultConfig :: Config
-- | Parse a command line.
parseWith :: Config -> Parser Mode
-- | Parse a configuration.
config :: Config -> Parser Config
-- | Flesh out a command line parser.
describe :: Config -> ParserInfo Mode
-- | A string describing the version of this benchmark (really, the version
-- of criterion that was used to build it).
versionInfo :: String
instance GHC.Generics.Generic Criterion.Main.Options.Mode
instance Data.Data.Data Criterion.Main.Options.Mode
instance GHC.Show.Show Criterion.Main.Options.Mode
instance GHC.Read.Read Criterion.Main.Options.Mode
instance GHC.Classes.Eq Criterion.Main.Options.Mode
instance GHC.Generics.Generic Criterion.Main.Options.MatchType
instance Data.Data.Data Criterion.Main.Options.MatchType
instance GHC.Show.Show Criterion.Main.Options.MatchType
instance GHC.Read.Read Criterion.Main.Options.MatchType
instance GHC.Enum.Enum Criterion.Main.Options.MatchType
instance GHC.Enum.Bounded Criterion.Main.Options.MatchType
instance GHC.Classes.Ord Criterion.Main.Options.MatchType
instance GHC.Classes.Eq Criterion.Main.Options.MatchType
-- | Wrappers for compiling and running benchmarks quickly and easily. See
-- defaultMain below for an example.
module Criterion.Main
-- | A pure function or impure action that can be benchmarked. The
-- Int64 parameter indicates the number of times to run the given
-- function or action.
data Benchmarkable
-- | Specification of a collection of benchmarks and environments. A
-- benchmark may consist of:
--
-- -- setupEnv = do -- let small = replicate 1000 (1 :: Int) -- big <- map length . words <$> readFile "/usr/dict/words" -- return (small, big) -- -- main = defaultMain [ -- -- notice the lazy pattern match here! -- env setupEnv $ \ ~(small,big) -> bgroup "main" [ -- bgroup "small" [ -- bench "length" $ whnf length small -- , bench "length . filter" $ whnf (length . filter (==1)) small -- ] -- , bgroup "big" [ -- bench "length" $ whnf length big -- , bench "length . filter" $ whnf (length . filter (==1)) big -- ] -- ] ] ---- -- Discussion. The environment created in the example above is -- intentionally not ideal. As Haskell's scoping rules suggest, -- the variable big is in scope for the benchmarks that use only -- small. It would be better to create a separate environment -- for big, so that it will not be kept alive while the -- unrelated benchmarks are being run. env :: NFData env => IO env -> (env -> Benchmark) -> Benchmark -- | Same as env, but but allows for an additional callback to clean -- up the environment. Resource clean up is exception safe, that is, it -- runs even if the Benchmark throws an exception. envWithCleanup :: NFData env => IO env -> (env -> IO a) -> (env -> Benchmark) -> Benchmark -- | Create a Benchmarkable where a fresh environment is allocated for -- every batch of runs of the benchmarkable. -- -- The environment is evaluated to normal form before the benchmark is -- run. -- -- When using whnf, whnfIO, etc. Criterion creates a -- Benchmarkable whichs runs a batch of N repeat runs of -- that expressions. Criterion may run any number of these batches to get -- accurate measurements. Environments created by env and -- envWithCleanup, are shared across all these batches of runs. -- -- This is fine for simple benchmarks on static input, but when -- benchmarking IO operations where these operations can modify (and -- especially grow) the environment this means that later batches might -- have their accuracy effected due to longer, for example, longer -- garbage collection pauses. -- -- An example: Suppose we want to benchmark writing to a Chan, if we -- allocate the Chan using environment and our benchmark consists of -- writeChan env (), the contents and thus size of the Chan will -- grow with every repeat. If Criterion runs a 1,000 batches of 1,000 -- repeats, the result is that the channel will have 999,000 items in it -- by the time the last batch is run. Since GHC GC has to copy the live -- set for every major GC this means our last set of writes will suffer a -- lot of noise of the previous repeats. -- -- By allocating a fresh environment for every batch of runs this -- function should eliminate this effect. perBatchEnv :: (NFData env, NFData b) => (Int64 -> IO env) -> (env -> IO b) -> Benchmarkable -- | Same as perBatchEnv, but but allows for an additional callback -- to clean up the environment. Resource clean up is exception safe, that -- is, it runs even if the Benchmark throws an exception. perBatchEnvWithCleanup :: (NFData env, NFData b) => (Int64 -> IO env) -> (Int64 -> env -> IO ()) -> (env -> IO b) -> Benchmarkable -- | Create a Benchmarkable where a fresh environment is allocated for -- every run of the operation to benchmark. This is useful for -- benchmarking mutable operations that need a fresh environment, such as -- sorting a mutable Vector. -- -- As with env and perBatchEnv the environment is evaluated -- to normal form before the benchmark is run. -- -- This introduces extra noise and result in reduce accuracy compared to -- other Criterion benchmarks. But allows easier benchmarking for mutable -- operations than was previously possible. perRunEnv :: (NFData env, NFData b) => IO env -> (env -> IO b) -> Benchmarkable -- | Same as perRunEnv, but but allows for an additional callback to -- clean up the environment. Resource clean up is exception safe, that -- is, it runs even if the Benchmark throws an exception. perRunEnvWithCleanup :: (NFData env, NFData b) => IO env -> (env -> IO ()) -> (env -> IO b) -> Benchmarkable -- | Construct a Benchmarkable value from an impure action, where -- the Int64 parameter indicates the number of times to run the -- action. toBenchmarkable :: (Int64 -> IO ()) -> Benchmarkable -- | Create a single benchmark. bench :: String -> Benchmarkable -> Benchmark -- | Group several benchmarks together under a common name. bgroup :: String -> [Benchmark] -> Benchmark -- | Apply an argument to a function, and evaluate the result to normal -- form (NF). nf :: NFData b => (a -> b) -> a -> Benchmarkable -- | Apply an argument to a function, and evaluate the result to weak head -- normal form (WHNF). whnf :: (a -> b) -> a -> Benchmarkable -- | Perform an action, then evaluate its result to normal form. This is -- particularly useful for forcing a lazy IO action to be -- completely performed. nfIO :: NFData a => IO a -> Benchmarkable -- | Perform an action, then evaluate its result to weak head normal form -- (WHNF). This is useful for forcing an IO action whose result is -- an expression to be evaluated down to a more useful value. whnfIO :: IO a -> Benchmarkable -- | An entry point that can be used as a main function. -- --
-- import Criterion.Main -- -- fib :: Int -> Int -- fib 0 = 0 -- fib 1 = 1 -- fib n = fib (n-1) + fib (n-2) -- -- main = defaultMain [ -- bgroup "fib" [ bench "10" $ whnf fib 10 -- , bench "35" $ whnf fib 35 -- , bench "37" $ whnf fib 37 -- ] -- ] --defaultMain :: [Benchmark] -> IO () -- | An entry point that can be used as a main function, with -- configurable defaults. -- -- Example: -- --
-- import Criterion.Main.Options
-- import Criterion.Main
--
-- myConfig = defaultConfig {
-- -- Do not GC between runs.
-- forceGC = False
-- }
--
-- main = defaultMainWith myConfig [
-- bench "fib 30" $ whnf fib 30
-- ]
--
--
-- If you save the above example as "Fib.hs", you should be able
-- to compile it as follows:
--
-- -- ghc -O --make Fib ---- -- Run "Fib --help" on the command line to get a list of command -- line options. defaultMainWith :: Config -> [Benchmark] -> IO () -- | Default benchmarking configuration. defaultConfig :: Config -- | Create a function that can tell if a name given on the command line -- matches a benchmark. makeMatcher :: MatchType -> [String] -> Either String (String -> Bool) -- | Run a set of Benchmarks with the given Mode. -- -- This can be useful if you have a Mode from some other source -- (e.g. from a one in your benchmark driver's command-line parser). runMode :: Mode -> [Benchmark] -> IO () -- | Core benchmarking code. module Criterion -- | A pure function or impure action that can be benchmarked. The -- Int64 parameter indicates the number of times to run the given -- function or action. data Benchmarkable -- | Specification of a collection of benchmarks and environments. A -- benchmark may consist of: -- --
-- setupEnv = do -- let small = replicate 1000 (1 :: Int) -- big <- map length . words <$> readFile "/usr/dict/words" -- return (small, big) -- -- main = defaultMain [ -- -- notice the lazy pattern match here! -- env setupEnv $ \ ~(small,big) -> bgroup "main" [ -- bgroup "small" [ -- bench "length" $ whnf length small -- , bench "length . filter" $ whnf (length . filter (==1)) small -- ] -- , bgroup "big" [ -- bench "length" $ whnf length big -- , bench "length . filter" $ whnf (length . filter (==1)) big -- ] -- ] ] ---- -- Discussion. The environment created in the example above is -- intentionally not ideal. As Haskell's scoping rules suggest, -- the variable big is in scope for the benchmarks that use only -- small. It would be better to create a separate environment -- for big, so that it will not be kept alive while the -- unrelated benchmarks are being run. env :: NFData env => IO env -> (env -> Benchmark) -> Benchmark -- | Same as env, but but allows for an additional callback to clean -- up the environment. Resource clean up is exception safe, that is, it -- runs even if the Benchmark throws an exception. envWithCleanup :: NFData env => IO env -> (env -> IO a) -> (env -> Benchmark) -> Benchmark -- | Create a Benchmarkable where a fresh environment is allocated for -- every batch of runs of the benchmarkable. -- -- The environment is evaluated to normal form before the benchmark is -- run. -- -- When using whnf, whnfIO, etc. Criterion creates a -- Benchmarkable whichs runs a batch of N repeat runs of -- that expressions. Criterion may run any number of these batches to get -- accurate measurements. Environments created by env and -- envWithCleanup, are shared across all these batches of runs. -- -- This is fine for simple benchmarks on static input, but when -- benchmarking IO operations where these operations can modify (and -- especially grow) the environment this means that later batches might -- have their accuracy effected due to longer, for example, longer -- garbage collection pauses. -- -- An example: Suppose we want to benchmark writing to a Chan, if we -- allocate the Chan using environment and our benchmark consists of -- writeChan env (), the contents and thus size of the Chan will -- grow with every repeat. If Criterion runs a 1,000 batches of 1,000 -- repeats, the result is that the channel will have 999,000 items in it -- by the time the last batch is run. Since GHC GC has to copy the live -- set for every major GC this means our last set of writes will suffer a -- lot of noise of the previous repeats. -- -- By allocating a fresh environment for every batch of runs this -- function should eliminate this effect. perBatchEnv :: (NFData env, NFData b) => (Int64 -> IO env) -> (env -> IO b) -> Benchmarkable -- | Same as perBatchEnv, but but allows for an additional callback -- to clean up the environment. Resource clean up is exception safe, that -- is, it runs even if the Benchmark throws an exception. perBatchEnvWithCleanup :: (NFData env, NFData b) => (Int64 -> IO env) -> (Int64 -> env -> IO ()) -> (env -> IO b) -> Benchmarkable -- | Create a Benchmarkable where a fresh environment is allocated for -- every run of the operation to benchmark. This is useful for -- benchmarking mutable operations that need a fresh environment, such as -- sorting a mutable Vector. -- -- As with env and perBatchEnv the environment is evaluated -- to normal form before the benchmark is run. -- -- This introduces extra noise and result in reduce accuracy compared to -- other Criterion benchmarks. But allows easier benchmarking for mutable -- operations than was previously possible. perRunEnv :: (NFData env, NFData b) => IO env -> (env -> IO b) -> Benchmarkable -- | Same as perRunEnv, but but allows for an additional callback to -- clean up the environment. Resource clean up is exception safe, that -- is, it runs even if the Benchmark throws an exception. perRunEnvWithCleanup :: (NFData env, NFData b) => IO env -> (env -> IO ()) -> (env -> IO b) -> Benchmarkable -- | Construct a Benchmarkable value from an impure action, where -- the Int64 parameter indicates the number of times to run the -- action. toBenchmarkable :: (Int64 -> IO ()) -> Benchmarkable -- | Create a single benchmark. bench :: String -> Benchmarkable -> Benchmark -- | Group several benchmarks together under a common name. bgroup :: String -> [Benchmark] -> Benchmark -- | Apply an argument to a function, and evaluate the result to normal -- form (NF). nf :: NFData b => (a -> b) -> a -> Benchmarkable -- | Apply an argument to a function, and evaluate the result to weak head -- normal form (WHNF). whnf :: (a -> b) -> a -> Benchmarkable -- | Perform an action, then evaluate its result to normal form. This is -- particularly useful for forcing a lazy IO action to be -- completely performed. nfIO :: NFData a => IO a -> Benchmarkable -- | Perform an action, then evaluate its result to weak head normal form -- (WHNF). This is useful for forcing an IO action whose result is -- an expression to be evaluated down to a more useful value. whnfIO :: IO a -> Benchmarkable -- | Run a benchmark interactively, and analyse its performance. benchmark :: Benchmarkable -> IO () -- | Run a benchmark interactively, and analyse its performance. benchmarkWith :: Config -> Benchmarkable -> IO () -- | Run a benchmark interactively, analyse its performance, and return the -- analysis. benchmark' :: Benchmarkable -> IO Report -- | Run a benchmark interactively, analyse its performance, and return the -- analysis. benchmarkWith' :: Config -> Benchmarkable -> IO Report