module Criterion.Main
    (
    
    
    
    
    
    
    
    
    
      Benchmarkable(..)
    , Benchmark
    , Pure
    
    , bench
    , bgroup
    , nf
    , whnf
    , nfIO
    , whnfIO
    
    , defaultMain
    , defaultMainWith
    
    , defaultOptions
    , parseArgs
    ) where
import Control.Monad.Trans (liftIO)
import Criterion (runAndAnalyse)
import Criterion.Config
import Criterion.Environment (measureEnvironment)
import Criterion.IO (note, printError)
import Criterion.Monad (Criterion, withConfig)
import Criterion.Types (Benchmarkable(..), Benchmark(..), Pure, bench,
                        benchNames, bgroup, nf, nfIO, whnf, whnfIO)
import Data.List (isPrefixOf, sort)
import Data.Monoid (Monoid(..), Last(..))
import System.Console.GetOpt
import System.Environment (getArgs, getProgName)
import System.Exit (ExitCode(..), exitWith)
ci :: String -> IO Config
ci s = case reads s' of
         [(d,"%")] -> check (d/100)
         [(d,"")]  -> check d
         _         -> parseError "invalid confidence interval provided"
  where s' = case s of
               ('.':_) -> '0':s
               _       -> s
        check d | d <= 0 = parseError "confidence interval is negative"
                | d >= 1 = parseError "confidence interval is greater than 1"
                | otherwise = return mempty { cfgConfInterval = ljust d }
pos :: (Num a, Ord a, Read a) =>
       String -> (Last a -> Config) -> String -> IO Config
pos q f s =
    case reads s of
      [(n,"")] | n > 0     -> return . f $ ljust n
               | otherwise -> parseError $ q ++ " must be positive"
      _                    -> parseError $ "invalid " ++ q ++ " provided"
noArg :: Config -> ArgDescr (IO Config)
noArg = NoArg . return
defaultOptions :: [OptDescr (IO Config)]
defaultOptions = [
   Option ['h','?'] ["help"] (noArg mempty { cfgPrintExit = Help })
          "print help, then exit"
 , Option ['G'] ["no-gc"] (noArg mempty { cfgPerformGC = ljust False })
          "do not collect garbage between iterations"
 , Option ['g'] ["gc"] (noArg mempty { cfgPerformGC = ljust True })
          "collect garbage between iterations"
 , Option ['I'] ["ci"] (ReqArg ci "CI")
          "bootstrap confidence interval"
 , Option ['l'] ["list"] (noArg mempty { cfgPrintExit = List })
          "print only a list of benchmark names"
 , Option ['o'] ["output"]
          (ReqArg (\t -> return $ mempty { cfgReport = ljust t }) "FILENAME")
          "report file to write to"
 , Option ['q'] ["quiet"] (noArg mempty { cfgVerbosity = ljust Quiet })
          "print less output"
 , Option [] ["resamples"]
          (ReqArg (pos "resample count"$ \n -> mempty { cfgResamples = n }) "N")
          "number of bootstrap resamples to perform"
 , Option ['s'] ["samples"]
          (ReqArg (pos "sample count" $ \n -> mempty { cfgSamples = n }) "N")
          "number of samples to collect"
 , Option ['t'] ["template"]
          (ReqArg (\t -> return $ mempty { cfgTemplate = ljust t }) "FILENAME")
          "template file to use"
 , Option ['u'] ["summary"] (ReqArg (\s -> return $ mempty { cfgSummaryFile = ljust s }) "FILENAME")
          "produce a summary CSV file of all results"
 , Option ['V'] ["version"] (noArg mempty { cfgPrintExit = Version })
          "display version, then exit"
 , Option ['v'] ["verbose"] (noArg mempty { cfgVerbosity = ljust Verbose })
          "print more output"
 ]
printBanner :: Config -> IO ()
printBanner cfg = withConfig cfg $ 
    case cfgBanner cfg of
      Last (Just b) -> note "%s\n" b
      _             -> note "Hey, nobody told me what version I am!\n"
printUsage :: [OptDescr (IO Config)] -> ExitCode -> IO a
printUsage options exitCode = do
  p <- getProgName
  putStr (usageInfo ("Usage: " ++ p ++ " [OPTIONS] [BENCHMARKS]") options)
  putStrLn "If no benchmark names are given, all are run\n\
           \Otherwise, benchmarks are run by prefix match"
  exitWith exitCode
parseArgs :: Config -> [OptDescr (IO Config)] -> [String]
          -> IO (Config, [String])
parseArgs defCfg options args =
  case getOpt Permute options args of
    (_, _, (err:_)) -> parseError err
    (opts, rest, _) -> do
      cfg <- (mappend defCfg . mconcat) `fmap` sequence opts
      case cfgPrintExit cfg of
        Help ->    printBanner cfg >> printUsage options ExitSuccess
        Version -> printBanner cfg >> exitWith ExitSuccess
        _ ->       return (cfg, rest)
defaultMain :: [Benchmark] -> IO ()
defaultMain = defaultMainWith defaultConfig (return ())
defaultMainWith :: Config
                -> Criterion () 
                -> [Benchmark]
                -> IO ()
defaultMainWith defCfg prep bs = do
  (cfg, args) <- parseArgs defCfg defaultOptions =<< getArgs
  withConfig cfg $
   if cfgPrintExit cfg == List
    then do
      _ <- note "Benchmarks:\n"
      mapM_ (note "  %s\n") (sort $ concatMap benchNames bs)
    else do
      case getLast $ cfgSummaryFile cfg of
        Just fn -> liftIO $ writeFile fn "Name,Mean,MeanLB,MeanUB,Stddev,StddevLB,StddevUB\n"
        Nothing -> return ()
      env <- measureEnvironment
      let shouldRun b = null args || any (`isPrefixOf` b) args
      prep
      runAndAnalyse shouldRun env $ BenchGroup "" bs
parseError :: String -> IO a
parseError msg = do
  _ <- printError "Error: %s" msg
  _ <- printError "Run \"%s --help\" for usage information\n" =<< getProgName
  exitWith (ExitFailure 64)