From d02b54b2c58d2d71e29700bbedbb38768d6c9e35 Mon Sep 17 00:00:00 2001 From: mguetlein Date: Tue, 13 Dec 2011 11:20:04 +0100 Subject: add filtering of validation reports --- lib/ot_predictions.rb | 238 ++--------------------------- lib/prediction_data.rb | 287 +++++++++++++++++++++++++++++++++++ lib/predictions.rb | 49 ++---- lib/validation_db.rb | 10 +- report/plot_factory.rb | 132 ++++++++++++++-- report/report_content.rb | 6 + report/report_factory.rb | 34 +++-- report/report_service.rb | 10 +- report/statistical_test.rb | 22 +-- report/validation_access.rb | 25 ++- report/validation_data.rb | 26 ++-- test/test_examples_util.rb | 3 +- validation/validation_application.rb | 104 ++++++------- validation/validation_service.rb | 168 ++++++++++---------- 14 files changed, 658 insertions(+), 456 deletions(-) create mode 100644 lib/prediction_data.rb diff --git a/lib/ot_predictions.rb b/lib/ot_predictions.rb index cf0168e..3be845b 100755 --- a/lib/ot_predictions.rb +++ b/lib/ot_predictions.rb @@ -1,12 +1,17 @@ +require "lib/prediction_data.rb" require "lib/predictions.rb" module Lib class OTPredictions < Predictions - CHECK_VALUES = ENV['RACK_ENV'] =~ /debug|test/ - + def initialize(data, compounds=nil) + raise unless data.is_a?(Hash) + super(data) + @compounds = compounds + end + def identifier(instance_index) compound(instance_index) end @@ -15,234 +20,9 @@ module Lib @compounds[instance_index] end - def initialize( feature_type, test_dataset_uris, test_target_dataset_uris, - prediction_feature, prediction_dataset_uris, predicted_variables, predicted_confidences, - subjectid=nil, task=nil ) - - test_dataset_uris = [test_dataset_uris] unless test_dataset_uris.is_a?(Array) - test_target_dataset_uris = [test_target_dataset_uris] unless test_target_dataset_uris.is_a?(Array) - prediction_dataset_uris = [prediction_dataset_uris] unless prediction_dataset_uris.is_a?(Array) - predicted_variables = [predicted_variables] unless predicted_variables.is_a?(Array) - predicted_confidences = [predicted_confidences] unless predicted_confidences.is_a?(Array) - LOGGER.debug "loading prediction -- test-dataset: "+test_dataset_uris.inspect - LOGGER.debug "loading prediction -- test-target-datset: "+test_target_dataset_uris.inspect - LOGGER.debug "loading prediction -- prediction-dataset: "+prediction_dataset_uris.inspect - LOGGER.debug "loading prediction -- predicted_variable: "+predicted_variables.inspect - LOGGER.debug "loading prediction -- predicted_confidence: "+predicted_confidences.inspect - LOGGER.debug "loading prediction -- prediction_feature: "+prediction_feature.to_s - raise "prediction_feature missing" unless prediction_feature - - @compounds = [] - all_predicted_values = [] - all_actual_values = [] - all_confidence_values = [] - accept_values = nil - - if task - task_step = 100 / (test_dataset_uris.size*2 + 1) - task_status = 0 - end - - test_dataset_uris.size.times do |i| - - test_dataset_uri = test_dataset_uris[i] - test_target_dataset_uri = test_target_dataset_uris[i] - prediction_dataset_uri = prediction_dataset_uris[i] - predicted_variable = predicted_variables[i] - predicted_confidence = predicted_confidences[i] - - predicted_variable=prediction_feature if predicted_variable==nil - - test_dataset = Lib::DatasetCache.find test_dataset_uri,subjectid - raise "test dataset not found: '"+test_dataset_uri.to_s+"'" unless test_dataset - - if test_target_dataset_uri == nil || test_target_dataset_uri.strip.size==0 || test_target_dataset_uri==test_dataset_uri - test_target_dataset_uri = test_dataset_uri - test_target_dataset = test_dataset - raise "prediction_feature not found in test_dataset, specify a test_target_dataset\n"+ - "prediction_feature: '"+prediction_feature.to_s+"'\n"+ - "test_dataset: '"+test_target_dataset_uri.to_s+"'\n"+ - "available features are: "+test_target_dataset.features.inspect if test_target_dataset.features.keys.index(prediction_feature)==nil - else - test_target_dataset = Lib::DatasetCache.find test_target_dataset_uri,subjectid - raise "test target datset not found: '"+test_target_dataset_uri.to_s+"'" unless test_target_dataset - if CHECK_VALUES - test_dataset.compounds.each do |c| - raise "test compound not found on test class dataset "+c.to_s unless test_target_dataset.compounds.include?(c) - end - end - raise "prediction_feature not found in test_target_dataset\n"+ - "prediction_feature: '"+prediction_feature.to_s+"'\n"+ - "test_target_dataset: '"+test_target_dataset_uri.to_s+"'\n"+ - "available features are: "+test_target_dataset.features.inspect if test_target_dataset.features.keys.index(prediction_feature)==nil - end - - compounds = test_dataset.compounds - LOGGER.debug "test dataset size: "+compounds.size.to_s - raise "test dataset is empty "+test_dataset_uri.to_s unless compounds.size>0 - - if feature_type=="classification" - av = test_target_dataset.accept_values(prediction_feature) - raise "'"+OT.acceptValue.to_s+"' missing/invalid for feature '"+prediction_feature.to_s+"' in dataset '"+ - test_target_dataset_uri.to_s+"', acceptValues are: '"+av.inspect+"'" if av==nil or av.length<2 - if accept_values==nil - accept_values=av - else - raise "accept values (in folds) differ "+av.inspect+" != "+accept_values.inspect if av!=accept_values - end - end - - actual_values = [] - compounds.each do |c| - case feature_type - when "classification" - actual_values << classification_val(test_target_dataset, c, prediction_feature, accept_values) - when "regression" - actual_values << regression_val(test_target_dataset, c, prediction_feature) - end - end - task.progress( task_status += task_step ) if task # loaded actual values - - prediction_dataset = Lib::DatasetCache.find prediction_dataset_uri,subjectid - raise "prediction dataset not found: '"+prediction_dataset_uri.to_s+"'" unless prediction_dataset - - # allow missing prediction feature if there are no compounds in the prediction dataset - raise "predicted_variable not found in prediction_dataset\n"+ - "predicted_variable '"+predicted_variable.to_s+"'\n"+ - "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+ - "available features are: "+prediction_dataset.features.inspect if prediction_dataset.features.keys.index(predicted_variable)==nil and prediction_dataset.compounds.size>0 - raise "predicted_confidence not found in prediction_dataset\n"+ - "predicted_confidence '"+predicted_confidence.to_s+"'\n"+ - "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+ - "available features are: "+prediction_dataset.features.inspect if predicted_confidence and prediction_dataset.features.keys.index(predicted_confidence)==nil and prediction_dataset.compounds.size>0 - - raise "more predicted than test compounds, #test: "+compounds.size.to_s+" < #prediction: "+ - prediction_dataset.compounds.size.to_s+", test-dataset: "+test_dataset_uri.to_s+", prediction-dataset: "+ - prediction_dataset_uri if compounds.size < prediction_dataset.compounds.size - if CHECK_VALUES - prediction_dataset.compounds.each do |c| - raise "predicted compound not found in test dataset:\n"+c+"\ntest-compounds:\n"+ - compounds.collect{|c| c.to_s}.join("\n") if compounds.index(c)==nil - end - end - - predicted_values = [] - confidence_values = [] - count = 0 - compounds.each do |c| - if prediction_dataset.compounds.index(c)==nil - predicted_values << nil - confidence_values << nil - else - case feature_type - when "classification" - predicted_values << classification_val(prediction_dataset, c, predicted_variable, accept_values) - when "regression" - predicted_values << regression_val(prediction_dataset, c, predicted_variable) - end - if predicted_confidence - confidence_values << confidence_val(prediction_dataset, c, predicted_confidence) - else - confidence_values << nil - end - end - count += 1 - end - @compounds += compounds - all_predicted_values += predicted_values - all_actual_values += actual_values - all_confidence_values += confidence_values - - task.progress( task_status += task_step ) if task # loaded predicted values and confidence - end - - #sort according to confidence if available - if all_confidence_values.compact.size>0 - values = [] - all_predicted_values.size.times do |i| - values << [all_predicted_values[i], all_actual_values[i], all_confidence_values[i], @compounds[i]] - end - values = values.sort_by{ |v| v[2] || 0 }.reverse # sorting by confidence - all_predicted_values = [] - all_actual_values = [] - all_confidence_values = [] - @compounds = [] - values.each do |v| - all_predicted_values << v[0] - all_actual_values << v[1] - all_confidence_values << v[2] - @compounds << v[3] - end - end - - super(all_predicted_values, all_actual_values, all_confidence_values, feature_type, accept_values) - raise "illegal num compounds "+num_info if @compounds.size != @predicted_values.size - task.progress(100) if task # done with the mathmatics - end - - private - def regression_val(dataset, compound, feature) - v = value(dataset, compound, feature) - begin - v = v.to_f unless v==nil or v.is_a?(Numeric) - v - rescue - LOGGER.warn "no numeric value for regression: '"+v.to_s+"'" - nil - end - end - - def confidence_val(dataset, compound, confidence) - v = value(dataset, compound, confidence) - begin - v = v.to_f unless v==nil or v.is_a?(Numeric) - v - rescue - LOGGER.warn "no numeric value for confidence '"+v.to_s+"'" - nil - end - end - - def classification_val(dataset, compound, feature, accept_values) - v = value(dataset, compound, feature) - i = accept_values.index(v.to_s) - raise "illegal class_value of prediction (value is '"+v.to_s+"'), accept values are "+ - accept_values.inspect unless v==nil or i!=nil - i - end - - def value(dataset, compound, feature) - return nil if dataset.data_entries[compound]==nil - if feature==nil - v = dataset.data_entries[compound].values[0] - else - v = dataset.data_entries[compound][feature] - end - return nil if v==nil - raise "no array "+v.class.to_s+" : '"+v.to_s+"'" unless v.is_a?(Array) - if v.size>1 - v.uniq! - if v.size>1 - v = nil - LOGGER.warn "not yet implemented: multiple non-equal values "+compound.to_s+" "+v.inspect - else - v = v[0] - end - elsif v.size==1 - v = v[0] - else - v = nil - end - raise "array" if v.is_a?(Array) - v = nil if v.to_s.size==0 - v - end - - public - def compute_stats - + def compute_stats() res = {} - case @feature_type + case feature_type when "classification" (Validation::VAL_CLASS_PROPS).each{ |s| res[s] = send(s)} when "regression" diff --git a/lib/prediction_data.rb b/lib/prediction_data.rb new file mode 100644 index 0000000..154d11a --- /dev/null +++ b/lib/prediction_data.rb @@ -0,0 +1,287 @@ + +module Lib + + class PredictionData + + CHECK_VALUES = ENV['RACK_ENV'] =~ /debug|test/ + + def self.filter_data( data, compounds, min_confidence, min_num_predictions, max_num_predictions, prediction_index=nil ) + + raise OpenTox::BadRequestError.new "please specify either min_confidence or max_num_predictions" if + (min_confidence!=nil and max_num_predictions!=nil) || (min_confidence==nil and max_num_predictions==nil) + raise OpenTox::BadRequestError.new "min_num_predictions only valid for min_confidence" if + (min_confidence==nil and min_num_predictions!=nil) + min_num_predictions = 0 if min_num_predictions==nil + + LOGGER.debug("filtering predictions, conf:'"+min_confidence.to_s+"' min_num_predictions: '"+ + min_num_predictions.to_s+"' max_num_predictions: '"+max_num_predictions.to_s+"' ") + + orig_size = data[:predicted_values].size + valid_indices = [] + data[:confidence_values].size.times do |i| + next if prediction_index!=nil and prediction_index!=data[:predicted_values][i] + valid = false + if min_confidence!=nil + valid = (valid_indices.size<=min_num_predictions or data[:confidence_values][i]>=min_confidence) + else + valid = valid_indices.size0 + + if feature_type=="classification" + av = test_target_dataset.accept_values(prediction_feature) + raise "'"+OT.acceptValue.to_s+"' missing/invalid for feature '"+prediction_feature.to_s+"' in dataset '"+ + test_target_dataset_uri.to_s+"', acceptValues are: '"+av.inspect+"'" if av==nil or av.length<2 + if accept_values==nil + accept_values=av + else + raise "accept values (in folds) differ "+av.inspect+" != "+accept_values.inspect if av!=accept_values + end + end + + actual_values = [] + compounds.each do |c| + case feature_type + when "classification" + actual_values << classification_val(test_target_dataset, c, prediction_feature, accept_values) + when "regression" + actual_values << regression_val(test_target_dataset, c, prediction_feature) + end + end + task.progress( task_status += task_step ) if task # loaded actual values + + prediction_dataset = Lib::DatasetCache.find prediction_dataset_uri,subjectid + raise "prediction dataset not found: '"+prediction_dataset_uri.to_s+"'" unless prediction_dataset + + # allow missing prediction feature if there are no compounds in the prediction dataset + raise "predicted_variable not found in prediction_dataset\n"+ + "predicted_variable '"+predicted_variable.to_s+"'\n"+ + "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+ + "available features are: "+prediction_dataset.features.inspect if prediction_dataset.features.keys.index(predicted_variable)==nil and prediction_dataset.compounds.size>0 + raise "predicted_confidence not found in prediction_dataset\n"+ + "predicted_confidence '"+predicted_confidence.to_s+"'\n"+ + "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+ + "available features are: "+prediction_dataset.features.inspect if predicted_confidence and prediction_dataset.features.keys.index(predicted_confidence)==nil and prediction_dataset.compounds.size>0 + + raise "more predicted than test compounds, #test: "+compounds.size.to_s+" < #prediction: "+ + prediction_dataset.compounds.size.to_s+", test-dataset: "+test_dataset_uri.to_s+", prediction-dataset: "+ + prediction_dataset_uri if compounds.size < prediction_dataset.compounds.size + if CHECK_VALUES + prediction_dataset.compounds.each do |c| + raise "predicted compound not found in test dataset:\n"+c+"\ntest-compounds:\n"+ + compounds.collect{|c| c.to_s}.join("\n") if compounds.index(c)==nil + end + end + + predicted_values = [] + confidence_values = [] + count = 0 + compounds.each do |c| + if prediction_dataset.compounds.index(c)==nil + predicted_values << nil + confidence_values << nil + else + case feature_type + when "classification" + predicted_values << classification_val(prediction_dataset, c, predicted_variable, accept_values) + when "regression" + predicted_values << regression_val(prediction_dataset, c, predicted_variable) + end + if predicted_confidence + confidence_values << confidence_val(prediction_dataset, c, predicted_confidence) + else + confidence_values << nil + end + end + count += 1 + end + all_compounds += compounds + all_predicted_values += predicted_values + all_actual_values += actual_values + all_confidence_values += confidence_values + + task.progress( task_status += task_step ) if task # loaded predicted values and confidence + end + + #sort according to confidence if available + if all_confidence_values.compact.size>0 + values = [] + all_predicted_values.size.times do |i| + values << [all_predicted_values[i], all_actual_values[i], all_confidence_values[i], all_compounds[i]] + end + values = values.sort_by{ |v| v[2] || 0 }.reverse # sorting by confidence + all_predicted_values = [] + all_actual_values = [] + all_confidence_values = [] + all_compounds = [] + values.each do |v| + all_predicted_values << v[0] + all_actual_values << v[1] + all_confidence_values << v[2] + all_compounds << v[3] + end + end + + raise "illegal num compounds "+all_compounds.size.to_s+" != "+all_predicted_values.size.to_s if + all_compounds.size != all_predicted_values.size + task.progress(100) if task # done with the mathmatics + data = { :predicted_values => all_predicted_values, :actual_values => all_actual_values, :confidence_values => all_confidence_values, + :feature_type => feature_type, :accept_values => accept_values } + + PredictionData.new(data, all_compounds) + end + + private + def initialize( data, compounds ) + @data = data + @compounds = compounds + end + + private + def self.regression_val(dataset, compound, feature) + v = value(dataset, compound, feature) + begin + v = v.to_f unless v==nil or v.is_a?(Numeric) + v + rescue + LOGGER.warn "no numeric value for regression: '"+v.to_s+"'" + nil + end + end + + def self.confidence_val(dataset, compound, confidence) + v = value(dataset, compound, confidence) + begin + v = v.to_f unless v==nil or v.is_a?(Numeric) + v + rescue + LOGGER.warn "no numeric value for confidence '"+v.to_s+"'" + nil + end + end + + def self.classification_val(dataset, compound, feature, accept_values) + v = value(dataset, compound, feature) + i = accept_values.index(v.to_s) + raise "illegal class_value of prediction (value is '"+v.to_s+"'), accept values are "+ + accept_values.inspect unless v==nil or i!=nil + i + end + + def self.value(dataset, compound, feature) + return nil if dataset.data_entries[compound]==nil + if feature==nil + v = dataset.data_entries[compound].values[0] + else + v = dataset.data_entries[compound][feature] + end + return nil if v==nil + raise "no array "+v.class.to_s+" : '"+v.to_s+"'" unless v.is_a?(Array) + if v.size>1 + v.uniq! + if v.size>1 + v = nil + LOGGER.warn "not yet implemented: multiple non-equal values "+compound.to_s+" "+v.inspect + else + v = v[0] + end + elsif v.size==1 + v = v[0] + else + v = nil + end + raise "array" if v.is_a?(Array) + v = nil if v.to_s.size==0 + v + end + end +end \ No newline at end of file diff --git a/lib/predictions.rb b/lib/predictions.rb index bd32efb..233267d 100755 --- a/lib/predictions.rb +++ b/lib/predictions.rb @@ -1,4 +1,6 @@ +require "lib/prediction_data.rb" + module Lib module Util @@ -19,36 +21,11 @@ module Lib return instance_index.to_s end - def data - { :predicted_values => @predicted_values, :actual_values => @actual_values, :confidence_values => @confidence_values, - :feature_type => @feature_type, :accept_values => @accept_values } - end - - def self.from_data( data, min_confidence=nil, prediction_index=nil ) - if min_confidence!=nil - valid_indices = [] - data[:confidence_values].size.times do |i| - valid_indices << i if prediction_index==data[:predicted_values][i] and - (valid_indices.size<=12 or data[:confidence_values][i]>=min_confidence) - end - [ :predicted_values, :actual_values, :confidence_values ].each do |key| - arr = [] - valid_indices.each{|i| arr << data[key][i]} - data[key] = arr - end - end - Predictions.new( data[:predicted_values], data[:actual_values], data[:confidence_values], - data[:feature_type], data[:accept_values] ) - end - - def initialize( predicted_values, - actual_values, - confidence_values, - feature_type, - accept_values=nil ) + def initialize( data ) + raise unless data.is_a?(Hash) - @feature_type = feature_type - @accept_values = accept_values + @feature_type = data[:feature_type] + @accept_values = data[:accept_values] @num_classes = 1 #puts "predicted: "+predicted_values.inspect @@ -57,11 +34,11 @@ module Lib raise "unknown feature_type: '"+@feature_type.to_s+"'" unless @feature_type=="classification" || @feature_type=="regression" - raise "no predictions" if predicted_values.size == 0 - num_info = "predicted:"+predicted_values.size.to_s+ - " confidence:"+confidence_values.size.to_s+" actual:"+actual_values.size.to_s - raise "illegal num actual values "+num_info if actual_values.size != predicted_values.size - raise "illegal num confidence values "+num_info if confidence_values.size != predicted_values.size + raise "no predictions" if data[:predicted_values].size == 0 + num_info = "predicted:"+data[:predicted_values].size.to_s+ + " confidence:"+data[:confidence_values].size.to_s+" actual:"+data[:actual_values].size.to_s + raise "illegal num actual values "+num_info if data[:actual_values].size != data[:predicted_values].size + raise "illegal num confidence values "+num_info if data[:confidence_values].size != data[:predicted_values].size case @feature_type when "classification" @@ -76,8 +53,8 @@ module Lib @actual_values = [] @confidence_values = [] init_stats() - (0..predicted_values.size-1).each do |i| - update_stats( predicted_values[i], actual_values[i], confidence_values[i] ) + (0..data[:predicted_values].size-1).each do |i| + update_stats( data[:predicted_values][i], data[:actual_values][i], data[:confidence_values][i] ) end end diff --git a/lib/validation_db.rb b/lib/validation_db.rb index f770dc2..c3a3f71 100755 --- a/lib/validation_db.rb +++ b/lib/validation_db.rb @@ -72,7 +72,7 @@ module Validation attribute :classification_statistics_yaml attribute :regression_statistics_yaml attribute :finished - attribute :prediction_data + attribute :prediction_data_yaml index :model_uri index :validation_type @@ -100,6 +100,14 @@ module Validation def regression_statistics=(rs) self.regression_statistics_yaml = rs.to_yaml end + + def prediction_data + YAML.load(self.prediction_data_yaml) if self.prediction_data_yaml + end + + def prediction_data=(pd) + self.prediction_data_yaml = pd.to_yaml + end def save super diff --git a/report/plot_factory.rb b/report/plot_factory.rb index 6083d26..2d7946f 100644 --- a/report/plot_factory.rb +++ b/report/plot_factory.rb @@ -338,7 +338,6 @@ module Reports accept_values = validation_set.unique_feature_type=="classification" ? validation_set.get_accept_values : nil if (validation_set.size > 1) - names = []; performance = []; confidence = []; faint = [] sum_confidence_values = { :predicted_values => [], :actual_values => [], :confidence_values => []} @@ -378,19 +377,107 @@ module Reports end def self.demo_roc_plot -# roc_values = {:confidence_values => [0.1, 0.9, 0.5, 0.6, 0.6, 0.6], -# :predicted_values => [1, 0, 0, 1, 0, 1], -# :actual_values => [0, 1, 0, 0, 1, 1]} - roc_values = {:confidence_values => [0.9, 0.8, 0.7, 0.6, 0.5, 0.4], - :true_positives => [1, 1, 1, 0, 1, 0]} - tp_fp_rates = get_tp_fp_rates(roc_values) - labels = [] - tp_fp_rates[:youden].each do |point,confidence| - labels << ["confidence: "+confidence.to_s, point[0], point[1]] - end - + + seed = 831 #rand(1000) + puts seed + srand seed + plot_data = [] - plot_data << RubyPlot::LinePlotData.new(:name => "testname", :x_values => tp_fp_rates[:fp_rate], :y_values => tp_fp_rates[:tp_rate], :labels => labels) + n = 250 + a_cutoff = 0.5 + + a_real = [] + a_class = [] + n.times do |i| + a_real << rand + a_class << ( a_real[-1]>a_cutoff ? "a" : "b") + end + + puts a_real.to_csv + puts a_class.to_csv + + p_props = [[],[]] + p_classes = [] + + 2.times do |index| + + if (index==0) + p_noise = 0.15 + p_cutoff = 0.8 + else + p_noise = 0.5 + p_cutoff = 0.5 + end + + p_real = [] + p_class = [] + p_prop = [] + correct = [] + n.times do |i| + if rand<0.04 + p_real << rand + else + p_real << (a_real[i] + ((rand * p_noise) * (rand<0.5 ? 1 : -1))) + end + p_prop << ((p_cutoff-p_real[i]).abs) + p_class << ( p_real[-1]>p_cutoff ? "a" : "b") + correct << ((p_class[i]==a_class[i]) ? 1 : 0) + end + + puts "" + puts p_real.to_csv + puts p_class.to_csv + puts p_prop.to_csv + + p_prop_max = p_prop.max + p_prop_min = p_prop.min + p_prop_delta = p_prop_max - p_prop_min + n.times do |i| + p_prop[i] = (p_prop[i] - p_prop_min)/p_prop_delta.to_f + p_props[index][i] = p_prop[i] + end + + puts p_prop.to_csv + + p_classes << p_class + + (0..n-2).each do |i| + (i+1..n-1).each do |j| + if p_prop[i] p_prop, + :true_positives => correct} + tp_fp_rates = get_tp_fp_rates(roc_values) + labels = [] + tp_fp_rates[:youden].each do |point,confidence| + labels << ["confidence: "+confidence.to_s, point[0], point[1]] + end + + plot_data << RubyPlot::LinePlotData.new(:name => "alg"+index.to_s, + :x_values => tp_fp_rates[:fp_rate], + :y_values => tp_fp_rates[:tp_rate]) + #,:labels => labels) + end + + puts "instance,class,prediction_1,propability_1,prediction_2,propability_2" + n.times do |i| + puts (i+1).to_s+","+a_class[i].to_s+","+p_classes[0][i].to_s+ + ","+p_props[0][i].to_s+ + ","+p_classes[1][i].to_s+","+p_props[1][i].to_s + end RubyPlot::plot_lines("/tmp/plot.png", "ROC-Plot", "False positive rate", @@ -424,7 +511,9 @@ module Reports conf.pop end if (predictions == nil) - predictions = Lib::Predictions.new([p[i]],[a[i]],[c[i]],feature_type, accept_values) + data = {:predicted_values => [p[i]],:actual_values => [a[i]], :confidence_values => [c[i]], + :feature_type => feature_type, :accept_values => accept_values} + predictions = Lib::Predictions.new(data) else predictions.update_stats(p[i], a[i], c[i]) end @@ -528,7 +617,20 @@ end #require "rubygems" #require "ruby-plot" -##Reports::PlotFactory::demo_ranking_plot +###Reports::PlotFactory::demo_ranking_plot +#class Array +# def sum +# inject( nil ) { |sum,x| sum ? sum+x : x } +# end +# +# def to_csv +# s = "" +# each do |x| +# s += (x.is_a?(Float) ? ("%.3f"%x) : (" "+x.to_s) )+", " +# end +# s +# end +#end #Reports::PlotFactory::demo_roc_plot #a = [1, 0, 1, 2, 3, 0, 2] diff --git a/report/report_content.rb b/report/report_content.rb index 61db340..3d92b52 100755 --- a/report/report_content.rb +++ b/report/report_content.rb @@ -22,6 +22,12 @@ class Reports::ReportContent @current_section = @xml_report.get_root_element end + def add_warning(warning) + sec = @xml_report.add_section(@current_section, "Warning") + @xml_report.add_paragraph(sec, warning) + end_section() + end + def add_paired_ttest_tables( validation_set, group_attribute, test_attributes, diff --git a/report/report_factory.rb b/report/report_factory.rb index 484cf12..2b978c5 100755 --- a/report/report_factory.rb +++ b/report/report_factory.rb @@ -63,14 +63,26 @@ module Reports::ReportFactory end end - def self.create_report_validation(validation_set, task=nil) + def self.add_filter_warning(report, filter_params) + msg = "The validation results for this report have been filtered." + msg += " Minimum confidence: "+ filter_params[:min_confidence].to_s if + filter_params[:min_confidence]!=nil + msg += " Minimum number of predictions (sorted with confidence): "+ filter_params[:min_num_predictions].to_s if + filter_params[:min_num_predictions]!=nil + msg += " Maximum number of predictions: "+ filter_params[:max_num_predictions].to_s if + filter_params[:max_num_predictions]!=nil + report.add_warning(msg) + end + + def self.create_report_validation(validation_set, params, task=nil) raise OpenTox::BadRequestError.new("num validations is not equal to 1") unless validation_set.size==1 val = validation_set.validations[0] pre_load_predictions( validation_set, OpenTox::SubTask.create(task,0,80) ) report = Reports::ReportContent.new("Validation report") - + add_filter_warning(report, validation_set.filter_params) if validation_set.filter_params!=nil + case val.feature_type when "classification" report.add_result(validation_set, [:validation_uri] + VAL_ATTR_TRAIN_TEST + VAL_ATTR_CLASS, "Results", "Results") @@ -109,8 +121,9 @@ module Reports::ReportFactory report end - def self.create_report_crossvalidation(validation_set, task=nil) + def self.create_report_crossvalidation(validation_set, params, task=nil) + raise OpenTox::BadRequestError.new "cv report not implemented for filter params" if validation_set.filter_params!=nil raise OpenTox::BadRequestError.new("num validations is not >1") unless validation_set.size>1 raise OpenTox::BadRequestError.new("crossvalidation-id not unique and != nil: "+ validation_set.get_values(:crossvalidation_id,false).inspect) if validation_set.unique_value(:crossvalidation_id)==nil @@ -119,7 +132,7 @@ module Reports::ReportFactory validation_set.unique_value(:num_folds).to_s+")") unless validation_set.unique_value(:num_folds).to_i==validation_set.size raise OpenTox::BadRequestError.new("num different folds is not equal to num validations") unless validation_set.num_different_values(:crossvalidation_fold)==validation_set.size raise OpenTox::BadRequestError.new("validations must have unique feature type, i.e. must be either all regression, "+ - "or all classification validations") unless validation_set.unique_feature_type + "or all classification validations") unless validation_set.unique_feature_type pre_load_predictions( validation_set, OpenTox::SubTask.create(task,0,80) ) validation_set.validations.sort! do |x,y| x.crossvalidation_fold.to_f <=> y.crossvalidation_fold.to_f @@ -138,13 +151,12 @@ module Reports::ReportFactory report.add_confusion_matrix(cv_set.validations[0]) report.add_section("Plots") [nil, :crossvalidation_fold].each do |split_attribute| - if (validation_set.get_accept_values.size == 2) if validation_set.get_true_accept_value!=nil report.add_roc_plot(validation_set, validation_set.get_true_accept_value,split_attribute) else - report.add_roc_plot(validation_set, validation_set.get_accept_values[0],split_attribute) - report.add_roc_plot(validation_set, validation_set.get_accept_values[1],split_attribute) + report.add_roc_plot(validation_set, validation_set.get_accept_values[0], split_attribute) + report.add_roc_plot(validation_set, validation_set.get_accept_values[1], split_attribute) report.align_last_two_images "ROC Plots" end end @@ -156,7 +168,8 @@ module Reports::ReportFactory end end report.end_section - report.add_result(validation_set, [:validation_uri, :validation_report_uri]+VAL_ATTR_CV+VAL_ATTR_CLASS-[:num_folds, :dataset_uri, :algorithm_uri], + report.add_result(validation_set, + [:validation_uri, :validation_report_uri]+VAL_ATTR_CV+VAL_ATTR_CLASS-[:num_folds, :dataset_uri, :algorithm_uri], "Results","Results") when "regression" report.add_result(cv_set, [:crossvalidation_uri]+VAL_ATTR_CV+VAL_ATTR_REGR-[:crossvalidation_fold],res_titel, res_titel, res_text) @@ -169,7 +182,9 @@ module Reports::ReportFactory report.add_confidence_plot(validation_set, :r_square, nil, :crossvalidation_fold) report.align_last_two_images "Confidence Plots Across Folds" report.end_section - report.add_result(validation_set, [:validation_uri, :validation_report_uri]+VAL_ATTR_CV+VAL_ATTR_REGR-[:num_folds, :dataset_uri, :algorithm_uri], "Results","Results") + report.add_result(validation_set, + [:validation_uri, :validation_report_uri]+VAL_ATTR_CV+VAL_ATTR_REGR-[:num_folds, :dataset_uri, :algorithm_uri], + "Results","Results") end task.progress(90) if task @@ -219,6 +234,7 @@ module Reports::ReportFactory pre_load_predictions( validation_set, OpenTox::SubTask.create(task,0,80) ) report = Reports::ReportContent.new("Algorithm comparison report") + add_filter_warning(report, validation_set.filter_params) if validation_set.filter_params!=nil if (validation_set.num_different_values(:dataset_uri)>1) all_merged = validation_set.merge([:algorithm_uri, :dataset_uri, :crossvalidation_id, :crossvalidation_uri]) diff --git a/report/report_service.rb b/report/report_service.rb index f299122..53a17ab 100644 --- a/report/report_service.rb +++ b/report/report_service.rb @@ -72,7 +72,15 @@ module Reports LOGGER.debug "identifier: '"+identifier.inspect+"'" raise "illegal num identifiers: "+identifier.size.to_s+" should be equal to num validation-uris ("+validation_uris.size.to_s+")" if identifier and identifier.size!=validation_uris.size - validation_set = Reports::ValidationSet.new(validation_uris, identifier, subjectid) + + filter_params = nil + [:min_confidence, :min_num_predictions, :max_num_predictions].each do |key| + if params[key] != nil + filter_params = {} unless filter_params + filter_params[key] = params[key].to_f + end + end + validation_set = Reports::ValidationSet.new(validation_uris, identifier, filter_params, subjectid) raise OpenTox::BadRequestError.new("cannot get validations from validation_uris '"+validation_uris.inspect+"'") unless validation_set and validation_set.size > 0 LOGGER.debug "loaded "+validation_set.size.to_s+" validation/s" task.progress(10) if task diff --git a/report/statistical_test.rb b/report/statistical_test.rb index 8d6bd62..4d85555 100644 --- a/report/statistical_test.rb +++ b/report/statistical_test.rb @@ -69,8 +69,8 @@ module Reports def self.paired_ttest( validations1, validations2, attribute, class_value, significance_level=0.95 ) - array1 = validations1.collect{ |v| (v.send(attribute).is_a?(Hash) ? v.send(attribute)[class_value] : v.send(attribute)) } - array2 = validations2.collect{ |v| (v.send(attribute).is_a?(Hash) ? v.send(attribute)[class_value] : v.send(attribute)) } + array1 = validations1.collect{ |v| (v.send(attribute).is_a?(Hash) ? v.send(attribute)[class_value].to_f : v.send(attribute).to_f) } + array2 = validations2.collect{ |v| (v.send(attribute).is_a?(Hash) ? v.send(attribute)[class_value].to_f : v.send(attribute).to_f) } LOGGER.debug "paired-t-testing "+attribute.to_s+" "+array1.inspect+" vs "+array2.inspect LIB::StatisticalTest.pairedTTest(array1, array2, significance_level) end @@ -83,12 +83,16 @@ module Reports end -#t1 = Time.new -#10.times do -# puts LIB::StatisticalTest.pairedTTest([1,2,3,4,5,12,4,2],[2,3,3,3,56,3,4,5]) -#end -#LIB::StatisticalTest.quitR -#t2 = Time.new -#puts t2-t1 +#x=["1.36840891838074", "2.89500403404236", "2.58440494537354", "1.96544003486633", "1.4017288684845", "1.68250012397766", "1.65089893341064", "2.24862003326416", "3.73909902572632", "2.36335206031799"] +#y=["1.9675121307373", "2.30981087684631", "2.59359288215637", "2.62243509292603", "1.98700189590454", "2.26789593696594", "2.03917217254639", "2.69466996192932", "1.96487307548523", "1.65820598602295"] +#puts LIB::StatisticalTest.pairedTTest(x,y) +# +##t1 = Time.new +##10.times do +# puts LIB::StatisticalTest.pairedTTest([1.01,2,3,4,5,12,4,2],[2,3,3,3,56,3,4,5]) +##end +#LIB::StatisticalTest.quit_r +##t2 = Time.new +##puts t2-t1 diff --git a/report/validation_access.rb b/report/validation_access.rb index 3b5335c..536923d 100755 --- a/report/validation_access.rb +++ b/report/validation_access.rb @@ -13,7 +13,7 @@ class Reports::ValidationDB self_uri.host == val_uri.host && self_uri.port == val_uri.port end - def resolve_cv_uris(validation_uris, identifier=nil, subjectid=nil) + def resolve_cv_uris(validation_uris, identifier, subjectid) res = {} count = 0 validation_uris.each do |u| @@ -47,8 +47,8 @@ class Reports::ValidationDB res end - def init_validation(validation, uri, subjectid=nil) - + def init_validation(validation, uri, filter_params, subjectid) + raise OpenTox::BadRequestError.new "not a validation uri: "+uri.to_s unless uri =~ /\/[0-9]+$/ validation_id = uri.split("/")[-1] raise OpenTox::BadRequestError.new "invalid validation id "+validation_id.to_s unless validation_id!=nil and @@ -63,6 +63,9 @@ class Reports::ValidationDB else v = YAML::load(OpenTox::RestClientWrapper.get uri, {:subjectid=>subjectid, :accept=>"application/serialize"}) end + v.filter_predictions(filter_params[:min_confidence], filter_params[:min_num_predictions], filter_params[:max_num_predictions]) if + filter_params + raise OpenTox::NotFoundError.new "validation with id "+validation_id.to_s+" not found" unless v raise OpenTox::BadRequestError.new "validation with id "+validation_id.to_s+" is not finished yet" unless v.finished (Validation::VAL_PROPS + Validation::VAL_CV_PROPS).each do |p| @@ -80,7 +83,7 @@ class Reports::ValidationDB end end - def init_validation_from_cv_statistics( validation, cv_uri, subjectid=nil ) + def init_validation_from_cv_statistics( validation, cv_uri, filter_params, subjectid ) raise OpenTox::BadRequestError.new "not a crossvalidation uri: "+cv_uri.to_s unless cv_uri.uri? and cv_uri =~ /crossvalidation.*\/[0-9]+$/ @@ -96,6 +99,9 @@ class Reports::ValidationDB cv = YAML::load(OpenTox::RestClientWrapper.get cv_uri, {:subjectid=>subjectid, :accept=>"application/serialize"}) v = YAML::load(OpenTox::RestClientWrapper.get cv_uri+"/statistics", {:subjectid=>subjectid, :accept=>"application/serialize"}) end + v.filter_predictions(filter_params[:min_confidence], filter_params[:min_num_predictions], filter_params[:max_num_predictions]) if + filter_params + (Validation::VAL_PROPS + Validation::VAL_CV_PROPS).each do |p| validation.send("#{p.to_s}=".to_sym, v.send(p)) end @@ -126,11 +132,14 @@ class Reports::ValidationDB end end - def get_predictions(validation, subjectid=nil, task=nil) - - Lib::OTPredictions.new( validation.feature_type, validation.test_dataset_uri, + def get_predictions(validation, filter_params, subjectid, task) + # we need compound info, cannot reuse stored prediction data + data = Lib::PredictionData.create( validation.feature_type, validation.test_dataset_uri, validation.test_target_dataset_uri, validation.prediction_feature, validation.prediction_dataset_uri, - validation.predicted_variable, validation.predicted_confidence, subjectid, task) + validation.predicted_variable, validation.predicted_confidence, subjectid, task ) + data = Lib::PredictionData.filter_data( data.data, data.compounds, + filter_params[:min_confidence], filter_params[:min_num_predictions], filter_params[:max_num_predictions] ) if filter_params!=nil + Lib::OTPredictions.new( data.data, data.compounds ) end def get_accept_values( validation, subjectid=nil ) diff --git a/report/validation_data.rb b/report/validation_data.rb index 61761ab..e91348d 100755 --- a/report/validation_data.rb +++ b/report/validation_data.rb @@ -86,18 +86,20 @@ module Reports VAL_ATTR_RANKING.collect{ |a| (a.to_s+"_ranking").to_sym } @@validation_attributes.each{ |a| attr_accessor a } - attr_reader :predictions, :subjectid + attr_reader :predictions, :subjectid, :filter_params attr_accessor :identifier, :validation_report_uri, :crossvalidation_report_uri - def initialize(uri = nil, subjectid = nil) - Reports.validation_access.init_validation(self, uri, subjectid) if uri + def initialize(uri = nil, filter_params=nil, subjectid = nil) + Reports.validation_access.init_validation(self, uri, filter_params, subjectid) if uri @subjectid = subjectid + raise unless filter_params==nil || filter_params.is_a?(Hash) + @filter_params = filter_params #raise "subjectid is nil" unless subjectid end - def self.from_cv_statistics( cv_uri, subjectid = nil ) - v = ReportValidation.new(nil, subjectid) - Reports.validation_access.init_validation_from_cv_statistics(v, cv_uri, subjectid) + def self.from_cv_statistics( cv_uri, filter_params, subjectid ) + v = ReportValidation.new(nil, filter_params, subjectid) + Reports.validation_access.init_validation_from_cv_statistics(v, cv_uri, filter_params, subjectid) v end @@ -116,7 +118,7 @@ module Reports task.progress(100) if task nil else - @predictions = Reports.validation_access.get_predictions( self, @subjectid, task ) + @predictions = Reports.validation_access.get_predictions( self, @filter_params, @subjectid, task ) end end end @@ -167,13 +169,13 @@ module Reports # class ValidationSet - def initialize(validation_uris=nil, identifier=nil, subjectid=nil) + def initialize(validation_uris=nil, identifier=nil, filter_params=nil, subjectid=nil) @unique_values = {} @validations = [] if validation_uris validation_uri_and_ids = ReportValidation.resolve_cv_uris(validation_uris, identifier, subjectid) validation_uri_and_ids.each do |u,id| - v = ReportValidation.new(u, subjectid) + v = ReportValidation.new(u, filter_params, subjectid) v.identifier = id if id ids = Reports.persistance.list_reports("validation",{:validation_uris=>v.validation_uri }) v.validation_report_uri = ReportService.instance.get_uri("validation",ids[-1]) if ids and ids.size>0 @@ -228,6 +230,10 @@ module Reports return false end + def filter_params + @validations.first.filter_params + end + # loads the attributes of the related crossvalidation into all validation objects # def load_cv_attributes @@ -424,7 +430,7 @@ module Reports new_set = ValidationSet.new grouping = Util.group(@validations, [:crossvalidation_id]) grouping.each do |g| - v = ReportValidation.from_cv_statistics(g[0].crossvalidation_uri, g[0].subjectid) + v = ReportValidation.from_cv_statistics(g[0].crossvalidation_uri, @validations.first.filter_params, g[0].subjectid) v.identifier = g.collect{|vv| vv.identifier}.uniq.join(";") new_set.validations << v end diff --git a/test/test_examples_util.rb b/test/test_examples_util.rb index a5f2867..b48096d 100755 --- a/test/test_examples_util.rb +++ b/test/test_examples_util.rb @@ -299,7 +299,8 @@ module ValidationExamples def report( waiting_task=nil ) #begin - @report_uri = Util.validation_post '/report/'+report_type,{:validation_uris => @validation_uri},@subjectid,waiting_task if @validation_uri + @report_uri = Util.validation_post '/report/'+report_type,{:validation_uris => @validation_uri}, + @subjectid,waiting_task if @validation_uri Util.validation_get "/report/"+report_uri.split("/")[-2]+"/"+report_uri.split("/")[-1], @subjectid if @report_uri #rescue => ex #puts "could not create report: "+ex.message diff --git a/validation/validation_application.rb b/validation/validation_application.rb index 0647b10..f126679 100755 --- a/validation/validation_application.rb +++ b/validation/validation_application.rb @@ -226,34 +226,34 @@ end # Validation::Validation.find( :all, :conditions => { :crossvalidation_id => params[:id] } ).collect{ |v| v.validation_uri.to_s }.join("\n")+"\n" #end -get '/crossvalidation/:id/predictions' do - LOGGER.info "get predictions for crossvalidation with id "+params[:id].to_s - begin - #crossvalidation = Validation::Crossvalidation.find(params[:id]) - crossvalidation = Validation::Crossvalidation.get(params[:id]) - rescue ActiveRecord::RecordNotFound => ex - raise OpenTox::NotFoundError.new "Crossvalidation '#{params[:id]}' not found." - end - raise OpenTox::BadRequestError.new "Crossvalidation '"+params[:id].to_s+"' not finished" unless crossvalidation.finished - - content_type "application/x-yaml" - validations = Validation::Validation.find( :crossvalidation_id => params[:id], :validation_type => "crossvalidation" ) - p = Lib::OTPredictions.to_array( validations.collect{ |v| v.compute_validation_stats_with_model(nil, true) } ).to_yaml - - case request.env['HTTP_ACCEPT'].to_s - when /text\/html/ - content_type "text/html" - description = - "The crossvalidation predictions as (yaml-)array." - related_links = - "All crossvalidations: "+url_for("/crossvalidation",:full)+"\n"+ - "Correspoding crossvalidation: "+url_for("/crossvalidation/"+params[:id],:full) - OpenTox.text_to_html p,@subjectid, related_links, description - else - content_type "text/x-yaml" - p - end -end +#get '/crossvalidation/:id/predictions' do +# LOGGER.info "get predictions for crossvalidation with id "+params[:id].to_s +# begin +# #crossvalidation = Validation::Crossvalidation.find(params[:id]) +# crossvalidation = Validation::Crossvalidation.get(params[:id]) +# rescue ActiveRecord::RecordNotFound => ex +# raise OpenTox::NotFoundError.new "Crossvalidation '#{params[:id]}' not found." +# end +# raise OpenTox::BadRequestError.new "Crossvalidation '"+params[:id].to_s+"' not finished" unless crossvalidation.finished +# +# content_type "application/x-yaml" +# validations = Validation::Validation.find( :crossvalidation_id => params[:id], :validation_type => "crossvalidation" ) +# p = Lib::OTPredictions.to_array( validations.collect{ |v| v.compute_validation_stats_with_model(nil, true) } ).to_yaml +# +# case request.env['HTTP_ACCEPT'].to_s +# when /text\/html/ +# content_type "text/html" +# description = +# "The crossvalidation predictions as (yaml-)array." +# related_links = +# "All crossvalidations: "+url_for("/crossvalidation",:full)+"\n"+ +# "Correspoding crossvalidation: "+url_for("/crossvalidation/"+params[:id],:full) +# OpenTox.text_to_html p,@subjectid, related_links, description +# else +# content_type "text/x-yaml" +# p +# end +#end get '/?' do @@ -595,30 +595,30 @@ get '/:id/probabilities' do end -get '/:id/predictions' do - LOGGER.info "get validation predictions "+params.inspect - begin - #validation = Validation::Validation.find(params[:id]) - validation = Validation::Validation.get(params[:id]) - rescue ActiveRecord::RecordNotFound => ex - raise OpenTox::NotFoundError.new "Validation '#{params[:id]}' not found." - end - raise OpenTox::BadRequestError.new "Validation '"+params[:id].to_s+"' not finished" unless validation.finished - p = validation.compute_validation_stats_with_model(nil, true) - case request.env['HTTP_ACCEPT'].to_s - when /text\/html/ - content_type "text/html" - description = - "The validation predictions as (yaml-)array." - related_links = - "All validations: "+url_for("/",:full)+"\n"+ - "Correspoding validation: "+url_for("/"+params[:id],:full) - OpenTox.text_to_html p.to_array.to_yaml,@subjectid, related_links, description - else - content_type "text/x-yaml" - p.to_array.to_yaml - end -end +#get '/:id/predictions' do +# LOGGER.info "get validation predictions "+params.inspect +# begin +# #validation = Validation::Validation.find(params[:id]) +# validation = Validation::Validation.get(params[:id]) +# rescue ActiveRecord::RecordNotFound => ex +# raise OpenTox::NotFoundError.new "Validation '#{params[:id]}' not found." +# end +# raise OpenTox::BadRequestError.new "Validation '"+params[:id].to_s+"' not finished" unless validation.finished +# p = validation.compute_validation_stats_with_model(nil, true) +# case request.env['HTTP_ACCEPT'].to_s +# when /text\/html/ +# content_type "text/html" +# description = +# "The validation predictions as (yaml-)array." +# related_links = +# "All validations: "+url_for("/",:full)+"\n"+ +# "Correspoding validation: "+url_for("/"+params[:id],:full) +# OpenTox.text_to_html p.to_array.to_yaml,@subjectid, related_links, description +# else +# content_type "text/x-yaml" +# p.to_array.to_yaml +# end +#end #get '/:id/:attribute' do # LOGGER.info "access validation attribute "+params.inspect diff --git a/validation/validation_service.rb b/validation/validation_service.rb index 2b8a18f..7f853ca 100755 --- a/validation/validation_service.rb +++ b/validation/validation_service.rb @@ -38,32 +38,12 @@ module Validation crossvalidation = Crossvalidation.get(cv_id) raise OpenTox::NotFoundError.new "Crossvalidation '#{cv_id}' not found." unless crossvalidation raise OpenTox::BadRequestError.new "Crossvalidation '"+cv_id.to_s+"' not finished" unless crossvalidation.finished - vals = Validation.find( :crossvalidation_id => cv_id, :validation_type => "crossvalidation" ).collect{|x| x} - models = vals.collect{|v| OpenTox::Model::Generic.find(v.model_uri, subjectid)} - feature_type = models.first.feature_type(subjectid) - test_dataset_uris = vals.collect{|v| v.test_dataset_uri} - test_target_dataset_uris = vals.collect{|v| v.test_target_dataset_uri} - prediction_feature = vals.first.prediction_feature - prediction_dataset_uris = vals.collect{|v| v.prediction_dataset_uri} - predicted_variables = models.collect{|m| m.predicted_variable(subjectid)} - predicted_confidences = models.collect{|m| m.predicted_confidence(subjectid)} - prediction = Lib::OTPredictions.new( feature_type, test_dataset_uris, test_target_dataset_uris, prediction_feature, - prediction_dataset_uris, predicted_variables, predicted_confidences, subjectid, OpenTox::SubTask.create(waiting_task, 0, 90) ) - + v = Validation.new - case feature_type - when "classification" - v.classification_statistics = prediction.compute_stats - when "regression" - v.regression_statistics = prediction.compute_stats - end - v.update :num_instances => prediction.num_instances, - :num_without_class => prediction.num_without_class, - :percent_without_class => prediction.percent_without_class, - :num_unpredicted => prediction.num_unpredicted, - :percent_unpredicted => prediction.percent_unpredicted, - :finished => true + v.compute_prediction_data_with_cv(vals, waiting_task) + v.compute_validation_stats() + (VAL_PROPS_GENERAL-[:validation_uri]).each do |p| v.send("#{p.to_s}=".to_sym, vals.collect{ |vv| vv.send(p) }.uniq.join(";")) end @@ -72,7 +52,6 @@ module Validation v.crossvalidation_id = crossvalidation.id v.crossvalidation_fold = vals.collect{ |vv| vv.crossvalidation_fold }.uniq.join(";") v.real_runtime = vals.collect{ |vv| vv.real_runtime }.uniq.join(";") - v.prediction_data = prediction.data.to_yaml v.save end waiting_task.progress(100) if waiting_task @@ -200,13 +179,26 @@ module Validation self.prediction_dataset_uri = prediction_dataset_uri self.real_runtime = benchmark.real - compute_validation_stats_with_model( model, false, OpenTox::SubTask.create(task, 50, 100) ) + compute_prediction_data_with_model( model, OpenTox::SubTask.create(task, 50, 100) ) + compute_validation_stats() end - - def compute_validation_stats_with_model( model=nil, dry_run=false, task=nil ) - - #model = OpenTox::Model::PredictionModel.find(self.model_uri) if model==nil and self.model_uri - #raise OpenTox::NotFoundError.new "model not found: "+self.model_uri.to_s unless model + + def compute_prediction_data_with_cv(cv_vals, waiting_task=nil) + models = cv_vals.collect{|v| OpenTox::Model::Generic.find(v.model_uri, subjectid)} + feature_type = models.first.feature_type(subjectid) + test_dataset_uris = cv_vals.collect{|v| v.test_dataset_uri} + test_target_dataset_uris = cv_vals.collect{|v| v.test_target_dataset_uri} + prediction_feature = cv_vals.first.prediction_feature + prediction_dataset_uris = cv_vals.collect{|v| v.prediction_dataset_uri} + predicted_variables = models.collect{|m| m.predicted_variable(subjectid)} + predicted_confidences = models.collect{|m| m.predicted_confidence(subjectid)} + p_data = Lib::PredictionData.create( feature_type, test_dataset_uris, test_target_dataset_uris, prediction_feature, + prediction_dataset_uris, predicted_variables, predicted_confidences, subjectid, waiting_task ) + self.prediction_data = p_data.data + p_data.data + end + + def compute_prediction_data_with_model(model=nil, task=nil) model = OpenTox::Model::Generic.find(self.model_uri, self.subjectid) if model==nil and self.model_uri raise OpenTox::NotFoundError.new "model not found: "+self.model_uri.to_s unless model @@ -219,76 +211,82 @@ module Validation raise "cannot determine whether model '"+model.uri.to_s+"' performs classification or regression, "+ "please set rdf-type of predictedVariables feature '"+predicted_variable.to_s+ "' to NominalFeature or NumericFeature" if (feature_type.to_s!="classification" and feature_type.to_s!="regression") - compute_validation_stats( feature_type, predicted_variable, predicted_confidence, - prediction_feature, algorithm_uri, dry_run, task ) + compute_prediction_data( feature_type, predicted_variable, predicted_confidence, + prediction_feature, algorithm_uri, task ) end - - def compute_validation_stats( feature_type, predicted_variable, predicted_confidence, prediction_feature, - algorithm_uri, dry_run, task ) - -# self.attributes = { :prediction_feature => prediction_feature } if self.prediction_feature==nil && prediction_feature -# self.attributes = { :algorithm_uri => algorithm_uri } if self.algorithm_uri==nil && algorithm_uri -# self.save! -# self.update :prediction_feature => prediction_feature if self.prediction_feature==nil && prediction_feature -# self.update :algorithm_uri => algorithm_uri if self.algorithm_uri==nil && algorithm_uri + + def compute_prediction_data( feature_type, predicted_variable, predicted_confidence, prediction_feature, + algorithm_uri, task ) self.prediction_feature = prediction_feature if self.prediction_feature==nil && prediction_feature self.algorithm_uri = algorithm_uri if self.algorithm_uri==nil && algorithm_uri - + LOGGER.debug "computing prediction stats" - prediction = Lib::OTPredictions.new( feature_type, + p_data = Lib::PredictionData.create( feature_type, self.test_dataset_uri, self.test_target_dataset_uri, self.prediction_feature, self.prediction_dataset_uri, predicted_variable, predicted_confidence, self.subjectid, OpenTox::SubTask.create(task, 0, 80) ) - #reading datasets and computing the main stats is 80% the work - - unless dry_run - case feature_type - when "classification" - #self.attributes = { :classification_statistics => prediction.compute_stats } - #self.update :classification_statistics => prediction.compute_stats - self.classification_statistics = prediction.compute_stats - when "regression" - #self.attributes = { :regression_statistics => prediction.compute_stats } - self.regression_statistics = prediction.compute_stats - end -# self.attributes = { :num_instances => prediction.num_instances, -# :num_without_class => prediction.num_without_class, -# :percent_without_class => prediction.percent_without_class, -# :num_unpredicted => prediction.num_unpredicted, -# :percent_unpredicted => prediction.percent_unpredicted, -# :finished => true} -# self.save! - self.update :num_instances => prediction.num_instances, - :num_without_class => prediction.num_without_class, - :percent_without_class => prediction.percent_without_class, - :num_unpredicted => prediction.num_unpredicted, - :percent_unpredicted => prediction.percent_unpredicted, - :prediction_data => prediction.data.to_yaml, - :finished => true - raise unless self.valid? - end - + self.prediction_data = p_data.data task.progress(100) if task - prediction + p_data.data end + def compute_validation_stats( save_stats=true ) + p_data = self.prediction_data + raise "compute prediction data before" if p_data==nil + predictions = Lib::OTPredictions.new(p_data) + case p_data[:feature_type] + when "classification" + self.classification_statistics = predictions.compute_stats() + when "regression" + self.regression_statistics = predictions.compute_stats() + end + self.num_instances = predictions.num_instances + self.num_without_class = predictions.num_without_class + self.percent_without_class = predictions.percent_without_class + self.num_unpredicted = predictions.num_unpredicted + self.percent_unpredicted = predictions.percent_unpredicted + if (save_stats) + self.finished = true + self.save + raise unless self.valid? + end + end - def probabilities( confidence, prediction ) - raise OpenTox::BadRequestError.new "Only supported for classification" if classification_statistics==nil - raise OpenTox::BadRequestError.new("illegal confidence value #{confidence}") if !confidence.is_a?(Numeric) or confidence<0 or confidence>1 + def filter_predictions( min_confidence, min_num_predictions, max_num_predictions, prediction=nil ) + self.prediction_data = nil + self.save - p_data = YAML.load(self.prediction_data.to_s) - raise OpenTox::BadRequestError.new("probabilities method works only for new validations - prediction data missing") unless p_data + raise OpenTox::BadRequestError.new "only supported for classification" if prediction!=nil and classification_statistics==nil + raise OpenTox::BadRequestError.new "illegal confidence value #{min_confidence}" unless + min_confidence==nil or (min_confidence.is_a?(Numeric) and min_confidence>=0 and min_confidence<=1) + p_data = self.prediction_data + if p_data==nil + # this is to ensure backwards compatibilty + # may cause a timeout on the first run, as this is not meant to run in a task + if validation_type=="crossvalidation_statistics" + vals = Validation.find( :crossvalidation_id => self.crossvalidation_id, :validation_type => "crossvalidation" ).collect{|x| x} + compute_prediction_data_with_cv(vals) + else + compute_prediction_data_with_model + end + self.save + p_data = self.prediction_data + end raise OpenTox::BadRequestError.new("illegal prediction value: '"+prediction+"', available: "+ - p_data[:accept_values].inspect) if p_data[:accept_values].index(prediction)==nil - - p = Lib::Predictions.from_data(p_data, confidence, p_data[:accept_values].index(prediction)) - raise OpenTox::BadRequestError("no confidence values available") unless p.confidence_values_available? - + p_data[:accept_values].inspect) if prediction!=nil and p_data[:accept_values].index(prediction)==nil + p = Lib::PredictionData.filter_data(p_data, nil, min_confidence, min_num_predictions, max_num_predictions, + prediction==nil ? nil : p_data[:accept_values].index(prediction)) + self.prediction_data = p.data + compute_validation_stats(false) + end + + def probabilities( confidence, prediction ) + filter_predictions( confidence, 12, nil, prediction ) + p_data = self.prediction_data + p = Lib::Predictions.new(p_data) prediction_counts = p.confusion_matrix_row( p_data[:accept_values].index(prediction) ) sum = 0 prediction_counts.each{|v| sum+=v} - probs = {} p_data[:accept_values].size.times do |i| probs[p_data[:accept_values][i]] = prediction_counts[i]/sum.to_f -- cgit v1.2.3