               ; verbose (-v: yes -v-: no)
-v -
               ; keep intermediary files (-x: yes -x-: no)
-x -
               ; flex rules (input file, binary format)
;-b (not specified, not doing actions with already created flex rules.)
               ; Word list (Optional if -b is specified. Otherwise N/A) (-I filename)
;-I  (N/A)
               ; Output, lemmas of words in input (-I option)
;-O  (N/A)
               ; word/lemma list
-i greek3.ph
               ; extra file name affix
-e ziggurat
               ; suffix only (-s: yes -s-: no)
-s -
               ; make rules with infixes less prevalent(-A: yes -A-: no)
-A -
               ; columns (1 or F or W=word,2 or B or L=lemma,3 or T=tags,0 or O=other)
-n FBO
               ; max recursion depth when attempting to create candidate rule
-Q 1
               ; flex rules (output, binary format, can be left unspecified)
;-o (Not specified, autogenerated)
               ; temp dir (including separator at end!)
-j tmp/
               ; penalties to decide which rule survives (4 or 6 floating point numbers: R=>R;W=>R;R=>W;W=>W[;R=>N/A;W=>NA], where R=#right cases, W=#wrong cases, N/A=#not applicable cases, previous success state=>success state after rule application)
-D 0.039035;-0.441248;0.889026;0.066674;0.039309;0.086126;
               ; compute parms (-p: yes -p-: no)
-p 
               ; expected optimal pruning threshold (only effective in combination with -XW)
-C -1
               ; tree penalty (-XC: constant -XD: more support is better -XE: higher entropy is better -XW: Fewer pattern characters other than wildcards is better)
-X C
               ; current parameters (-P filename)
-P parms.txt
               ; best parameters (-B filename)
-B best_ziggurat.txt
               ; start training with minimal fraction of training pairs (-Ln: 0.0 < n <= 1.0)
-L 0.017709
               ; end training with maximal fraction of training pairs (-Hn: 0.0 < n <= 1.0)
-H 1.000000
               ; number of differently sized fractions of trainingdata (natural number)
-K 20
               ; number of iterations of training with same fraction of training data when fraction is minimal (positive number)
-N 100.000000
               ; number of iterations of training with same fraction of training data when fraction is maximal (positive number)
-M 10.000000
               ; competition function (deprecated)
;-f  (N/A)
               ; redo training after homographs for next round are removed (-R: yes -R-: no)
;-R - (N/A)
               ; max. pruning threshold to evaluate
-c 5
               ; test with the training data (-T: yes -T-: no)
-T 
               ; test with data not used for training (-t: yes -t-: no)
-t 
               ; create flexrules using full training set (-F: yes -F-: no)
-F 
               ; Number of clusters found in word/lemma list: 45779
               ; Number of lines found in word/lemma list:    564700

; Evaluation:
; -----------
; Lemmatization results for all data in the training set.
; For pruning threshold 0 there may be no errors (diff%%).

; prun. thrshld.              0              1              2              3              4              5 
; rules            59706.000000   34411.000000   10938.000000    6868.000000    5244.000000    4355.000000 
; rules%              10.573048       6.093678       1.936958       1.216221       0.928635       0.771206 
; same%               94.471755      91.312910      88.544714      87.492651      86.817425      86.325837 
; ambi1%               2.752612       2.794404       2.472995       2.376837       2.342483       2.268284 
; ambi2%               2.752612       2.647600       2.198335       2.096511       2.060740       2.007615 
; ambi3%               0.023021       0.014167       0.003188       0.000177       0.000000       0.000000 
; diff%                0.000000       3.230919       6.780769       8.033823       8.779352       9.398265 
; same%stdev           0.000000       0.000000       0.000000       0.000000       0.000000       0.000000 
; ambi1%stdev          0.000000       0.000000       0.000000       0.000000       0.000000       0.000000 
; ambi2%stdev          0.000000       0.000000       0.000000       0.000000       0.000000       0.000000 
; ambi3%stdev          0.000000       0.000000       0.000000       0.000000       0.000000       0.000000 
; diff%stdev           0.000000       0.000000       0.000000       0.000000       0.000000       0.000000 
; 
;Evaluation of prediction of ambiguity (whether a word has more than one possible lemma)
;---------------------------------------------------------------------------------------
; amb.rules%           5.528245       5.593058       4.934478       4.730653       4.682486       4.537808 
; false_amb%           0.000000       0.721091       0.761289       0.718435       0.760581       0.687622 
; false_not_amb%       0.000000       0.656278       1.355056       1.516026       1.606340       1.678059 
; true_amb%            5.528245       4.871967       4.173189       4.012219       3.921905       3.850186 
; true_not_amb%       94.471755      93.750664      93.710466      93.753320      93.711174      93.784133 
; precision            1.000000       0.771595       0.732683       0.736310       0.720532       0.736817 
; recall               1.000000       0.881286       0.754885       0.725767       0.709430       0.696457 

; Evaluation:
; -----------
; Lemmatization results for data that is not part of the training data.

; prun. thrshld.              0              1              2              3              4              5 
; rules            58901.800000   33958.800000   10808.600000    6812.600000    5212.200000    4327.000000 
; rules%              10.578510       6.098855       1.941178       1.223514       0.936089       0.777111 
; same%               85.424004      85.619093      85.905394      85.596291      85.277053      84.935012 
; ambi1%               2.564038       2.467760       2.265069       2.161190       2.123185       2.135853 
; ambi2%               2.082647       2.059844       1.976235       1.910360       1.882490       1.882490 
; ambi3%               0.005067       0.002534       0.000000       0.002534       0.000000       0.000000 
; diff%                9.924244       9.850769       9.853303      10.329626      10.717272      11.046644 
; same%stdev           0.826480       0.830439       0.933204       0.962539       1.002514       1.028687 
; ambi1%stdev          0.272754       0.204068       0.200208       0.201529       0.204925       0.199445 
; ambi2%stdev          0.186769       0.144487       0.188432       0.165448       0.170775       0.204926 
; ambi3%stdev          0.006812       0.005468       0.000000       0.005909       0.000000       0.000000 
; diff%stdev           0.884649       0.881438       0.878746       0.855822       0.883543       0.929821 
; 
;Evaluation of prediction of ambiguity (whether a word has more than one possible lemma)
;---------------------------------------------------------------------------------------
; amb.rules%           5.128075       4.938053       4.514936       4.345182       4.304644       4.279308 
; false_amb%           0.235628       0.220426       0.169753       0.136816       0.136816       0.152018 
; false_not_amb%       0.243229       0.233094       0.278700       0.304036       0.304036       0.304036 
; true_amb%            0.841166       0.851301       0.805696       0.780359       0.780359       0.780359 
; true_not_amb%       19.688870      19.704072      19.754744      19.787681      19.787681      19.772480 
; precision            0.640927       0.658824       0.703540       0.740385       0.740385       0.719626 
; recall               0.775701       0.785047       0.742991       0.719626       0.719626       0.719626 
; 
; Power law relating the number of rules in the decision tree to the number of examples in the training data
;----------------------------------------------------------------------------------------------------------
; #rules =        2.179*N^0.769  1.306*N^0.766  0.787*N^0.718  0.657*N^0.697  0.587*N^0.685  0.527*N^0.680 

; Postscriptum

; The number of rules can be estimated from the number of training examples by
; a power law. See the last line in the table above, which is based on 7
; different samples from the total available training data mass varying in size
; from 1.54 % to 98.56 %