summaryrefslogtreecommitdiff
path: root/report/plot_factory.rb
diff options
context:
space:
mode:
Diffstat (limited to 'report/plot_factory.rb')
-rw-r--r--report/plot_factory.rb344
1 files changed, 238 insertions, 106 deletions
diff --git a/report/plot_factory.rb b/report/plot_factory.rb
index 2074ce5..f114dd3 100644
--- a/report/plot_factory.rb
+++ b/report/plot_factory.rb
@@ -2,6 +2,10 @@ ENV['JAVA_HOME'] = "/usr/bin" unless ENV['JAVA_HOME']
ENV['PATH'] = ENV['JAVA_HOME']+":"+ENV['PATH'] unless ENV['PATH'].split(":").index(ENV['JAVA_HOME'])
ENV['RANK_PLOTTER_JAR'] = "RankPlotter/RankPlotter.jar" unless ENV['RANK_PLOTTER_JAR']
+CONF_PLOT_RANGE = { :accuracy => [0.45,1.05], :true_positive_rate => [0.45,1.05],:true_negative_rate => [0.45,1.05],
+ :false_positive_rate => [0.45,1.05], :false_negative_rate => [0.45,1.05], :positive_predictive_value => [0.45,1.05],
+ :negative_predictive_value => [0.45,1.05], :r_square => [0, 1.05], :sample_correlation_coefficient => [0, 1.05] }
+
class Array
def swap!(i,j)
tmp = self[i]
@@ -47,7 +51,6 @@ class Array
end
end
-
module Reports
module PlotFactory
@@ -81,9 +84,11 @@ module Reports
y_i = valid_indices.collect{ |i| y_i[i] }
end
- names << ( name_attribute==:crossvalidation_fold ? "fold " : "" ) + v.send(name_attribute).to_s
- x << x_i
- y << y_i
+ if x_i.size>0
+ names << ( name_attribute==:crossvalidation_fold ? "fold " : "" ) + v.send(name_attribute).to_s
+ x << x_i
+ y << y_i
+ end
end
names = [""] if names.size==1
@@ -95,6 +100,34 @@ module Reports
omit_count
end
+ def self.create_train_test_plot( out_files, validation_set, only_prediction_feature, waiting_task )
+ if only_prediction_feature
+ train = []
+ test = []
+ validation_set.validations.each do |v|
+ [[v.test_dataset_uri, test, v.test_target_dataset_uri],
+ [v.training_dataset_uri, train, v.training_dataset_uri]].each do |uri,array,uri2|
+ d = Lib::DatasetCache.find(uri, validation_set.validations[0].subjectid)
+ d2 = Lib::DatasetCache.find((uri2 ? uri2 : uri), validation_set.validations[0].subjectid)
+ d.compounds.each do |c|
+ d2.data_entries[c][v.prediction_feature].each do |val|
+ array << val
+ end if d2.data_entries[c] and d2.data_entries[c][v.prediction_feature]
+ end
+ end
+ end
+ waiting_task.progress(50) if waiting_task
+
+ numerical = validation_set.unique_feature_type=="regression"
+ Reports::r_util.double_hist_plot(out_files, train, test, numerical, numerical, "Training Data", "Test Data",
+ "Prediction Feature Distribution", validation_set.validations.first.prediction_feature )
+ else
+ Reports::r_util.feature_value_plot(out_files, validation_set.validations[0].training_feature_dataset_uri,
+ validation_set.validations[0].test_feature_dataset_uri, "Training Data", "Test Data",
+ nil, true, validation_set.validations[0].subjectid, waiting_task )
+ end
+ end
+
# creates a roc plot (result is plotted into out_file)
# * if (split_set_attributes == nil?)
@@ -130,31 +163,22 @@ module Reports
end
end
- def self.confidence_plot_class_performance( validation_set, actual_accept_value, predicted_accept_value )
+ def self.confidence_plot_class_performance( validation_set, performance_attribute, performance_accept_value )
true_class = nil
- if actual_accept_value==nil and predicted_accept_value==nil
- perf = "Accuracy"
- elsif actual_accept_value!=nil
- if validation_set.get_true_accept_value==actual_accept_value
- perf = "True Positive Rate"
- true_class = actual_accept_value
- elsif validation_set.get_accept_values.size==2 and validation_set.get_true_accept_value==(validation_set.get_accept_values-[actual_accept_value])[0]
- perf = "True Negative Rate"
+ if performance_accept_value==nil
+ perf = performance_attribute.to_s.nice_attr
+ else
+ invert_true_class = (validation_set.get_accept_values.size==2 and
+ validation_set.get_true_accept_value==(validation_set.get_accept_values-[performance_accept_value])[0])
+ if invert_true_class && performance_attribute==:true_positive_rate
+ perf = :true_negative_rate.to_s.nice_attr
true_class = validation_set.get_true_accept_value
- else
- perf = "True Positive Rate"
- true_class = actual_accept_value
- end
- elsif predicted_accept_value!=nil
- if validation_set.get_true_accept_value==predicted_accept_value
- perf = "Positive Predictive Value"
- true_class = predicted_accept_value
- elsif validation_set.get_accept_values.size==2 and validation_set.get_true_accept_value==(validation_set.get_accept_values-[predicted_accept_value])[0]
- perf = "Negative Predictive Value"
+ elsif invert_true_class && performance_attribute==:positive_predictive_value
+ perf = :negative_predictive_value.to_s.nice_attr
true_class = validation_set.get_true_accept_value
else
- perf = "Positive Predictive Value"
- true_class = predicted_accept_value
+ perf = performance_attribute.to_s.nice_attr
+ true_class = performance_accept_value
end
end
title = perf+" vs Confidence Plot"
@@ -162,12 +186,8 @@ module Reports
{:title =>title, :performance => perf}
end
-
- def self.create_confidence_plot( out_files, validation_set, actual_accept_value = nil,
- predicted_accept_value = nil, split_set_attribute=nil, show_single_curves=false )
+ def self.create_confidence_plot( out_files, validation_set, performance_attribute, performance_accept_value, split_set_attribute=nil, show_single_curves=false )
- raise "param combination not supported" if actual_accept_value!=nil and predicted_accept_value!=nil
-
out_files = [out_files] unless out_files.is_a?(Array)
LOGGER.debug "creating confidence plot for '"+validation_set.size.to_s+"' validations, out-file:"+out_files.inspect
@@ -178,7 +198,7 @@ module Reports
performance = []
attribute_values.each do |value|
begin
- data = transform_confidence_predictions(validation_set.filter({split_set_attribute => value}), actual_accept_value, predicted_accept_value, false)
+ data = transform_confidence_predictions(validation_set.filter({split_set_attribute => value}), performance_attribute, performance_accept_value, false)
names << split_set_attribute.to_s.nice_attr+" "+value.to_s
confidence << data[:confidence][0]
performance << data[:performance][0]
@@ -186,30 +206,47 @@ module Reports
LOGGER.warn "could not create confidence plot for "+value.to_s
end
end
- #RubyPlot::plot_lines(out_file, "Percent Correct vs Confidence Plot", "Confidence", "Percent Correct", names, fp_rates, tp_rates )
out_files.each do |out_file|
- case validation_set.unique_feature_type
- when "classification"
- info = confidence_plot_class_performance( validation_set, actual_accept_value, predicted_accept_value )
- RubyPlot::accuracy_confidence_plot(out_file, info[:title], "Confidence", info[:performance], names, confidence, performance)
- when "regression"
- RubyPlot::accuracy_confidence_plot(out_file, "RMSE vs Confidence Plot", "Confidence", "RMSE", names, confidence, performance, true)
- end
+ info = confidence_plot_class_performance( validation_set, performance_attribute, performance_accept_value )
+ RubyPlot::confidence_plot(out_file, info[:title], "Confidence", info[:performance],
+ names, confidence, performance, CONF_PLOT_RANGE[performance_attribute])
end
else
- data = transform_confidence_predictions(validation_set, actual_accept_value, predicted_accept_value, show_single_curves)
- out_files.each do |out_file|
- case validation_set.unique_feature_type
- when "classification"
- info = confidence_plot_class_performance( validation_set, actual_accept_value, predicted_accept_value )
- RubyPlot::accuracy_confidence_plot(out_file, info[:title], "Confidence", info[:performance], data[:names], data[:confidence], data[:performance])
- when "regression"
- RubyPlot::accuracy_confidence_plot(out_file, "RMSE vs Confidence Plot", "Confidence", "RMSE", data[:names], data[:confidence], data[:performance], true)
- end
+ data = transform_confidence_predictions(validation_set, performance_attribute, performance_accept_value, show_single_curves)
+ out_files.each do |out_file|
+ info = confidence_plot_class_performance( validation_set, performance_attribute, performance_accept_value )
+ RubyPlot::confidence_plot(out_file, info[:title], "Confidence", info[:performance],
+ data[:names], data[:confidence], data[:performance], CONF_PLOT_RANGE[performance_attribute])
end
end
end
+ def self.create_box_plot( out_files, validation_set, title_attribute, value_attribute, class_value )
+
+ out_files = [out_files] unless out_files.is_a?(Array)
+ LOGGER.debug "creating box plot, out-files:"+out_files.inspect
+
+ data = {}
+ validation_set.validations.each do |v|
+ value = v.send(value_attribute)
+ if value.is_a?(Hash)
+ if class_value==nil
+ avg_value = 0
+ value.values.each{ |val| avg_value+=val }
+ value = avg_value/value.values.size.to_f
+ else
+ raise "box plot value is hash, but no entry for class-value ("+class_value.to_s+
+ "); value for "+value_attribute.to_s+" -> "+value.inspect unless value.key?(class_value)
+ value = value[class_value]
+ end
+ end
+
+ data[v.send(title_attribute).to_s] = [] unless data[v.send(title_attribute).to_s]
+ data[v.send(title_attribute).to_s] << value
+ end
+
+ Reports::r_util.boxplot( out_files, data)
+ end
def self.create_bar_plot( out_files, validation_set, title_attribute, value_attributes )
@@ -349,70 +386,164 @@ module Reports
end
- def self.transform_confidence_predictions(validation_set, actual_accept_value, predicted_accept_value, add_single_folds=false)
+
+ def self.transform_confidence_predictions(validation_set, performance_attribute, performance_accept_value, add_single_folds)
+
+ feature_type = validation_set.unique_feature_type
+ accept_values = validation_set.unique_feature_type=="classification" ? validation_set.get_accept_values : nil
if (validation_set.size > 1)
-
names = []; performance = []; confidence = []; faint = []
sum_confidence_values = { :predicted_values => [], :actual_values => [], :confidence_values => []}
(0..validation_set.size-1).each do |i|
- confidence_values = validation_set.get(i).get_predictions.get_prediction_values(actual_accept_value, predicted_accept_value)
+ confidence_values = validation_set.get(i).get_predictions.get_prediction_values(performance_attribute, performance_accept_value)
sum_confidence_values[:predicted_values] += confidence_values[:predicted_values]
sum_confidence_values[:confidence_values] += confidence_values[:confidence_values]
sum_confidence_values[:actual_values] += confidence_values[:actual_values]
if add_single_folds
begin
- pref_conf_rates = get_performance_confidence_rates(confidence_values)
+ perf_conf_rates = get_performance_confidence_rates(confidence_values, performance_attribute, performance_accept_value,
+ feature_type, accept_values)
names << "fold "+i.to_s
- performance << pref_conf_rates[:performance]
- confidence << pref_conf_rates[:confidence]
+ performance << perf_conf_rates[:performance]
+ confidence << perf_conf_rates[:confidence]
faint << true
rescue
LOGGER.warn "could not get confidence vals for fold "+i.to_s
end
end
end
- pref_conf_rates = get_performance_confidence_rates(sum_confidence_values, validation_set.unique_feature_type)
+ perf_conf_rates = get_performance_confidence_rates(sum_confidence_values, performance_attribute, performance_accept_value,
+ feature_type, accept_values)
names << nil # "all"
- performance << pref_conf_rates[:performance]
- confidence << pref_conf_rates[:confidence]
+ performance << perf_conf_rates[:performance]
+ confidence << perf_conf_rates[:confidence]
faint << false
return { :names => names, :performance => performance, :confidence => confidence, :faint => faint }
else
- confidence_values = validation_set.validations[0].get_predictions.get_prediction_values(actual_accept_value, predicted_accept_value)
- pref_conf_rates = get_performance_confidence_rates(confidence_values, validation_set.unique_feature_type)
- return { :names => [""], :performance => [pref_conf_rates[:performance]], :confidence => [pref_conf_rates[:confidence]] }
+ confidence_values = validation_set.validations[0].get_predictions.get_prediction_values(performance_attribute, performance_accept_value)
+ perf_conf_rates = get_performance_confidence_rates(confidence_values, performance_attribute, performance_accept_value,
+ feature_type, accept_values)
+ return { :names => [""], :performance => [perf_conf_rates[:performance]], :confidence => [perf_conf_rates[:confidence]] }
end
end
def self.demo_roc_plot
-# roc_values = {:confidence_values => [0.1, 0.9, 0.5, 0.6, 0.6, 0.6],
-# :predicted_values => [1, 0, 0, 1, 0, 1],
-# :actual_values => [0, 1, 0, 0, 1, 1]}
- roc_values = {:confidence_values => [0.9, 0.8, 0.7, 0.6, 0.5, 0.4],
- :true_positives => [1, 1, 1, 0, 1, 0]}
- tp_fp_rates = get_tp_fp_rates(roc_values)
- labels = []
- tp_fp_rates[:youden].each do |point,confidence|
- labels << ["confidence: "+confidence.to_s, point[0], point[1]]
- end
-
+
+ seed = 831 #rand(1000)
+ puts seed
+ srand seed
+
plot_data = []
- plot_data << RubyPlot::LinePlotData.new(:name => "testname", :x_values => tp_fp_rates[:fp_rate], :y_values => tp_fp_rates[:tp_rate], :labels => labels)
+ n = 250
+ a_cutoff = 0.5
+
+ a_real = []
+ a_class = []
+ n.times do |i|
+ a_real << rand
+ a_class << ( a_real[-1]>a_cutoff ? "a" : "b")
+ end
+
+ puts a_real.to_csv
+ puts a_class.to_csv
+
+ p_props = [[],[]]
+ p_classes = []
+
+ 2.times do |index|
+
+ if (index==0)
+ p_noise = 0.15
+ p_cutoff = 0.8
+ else
+ p_noise = 0.5
+ p_cutoff = 0.5
+ end
+
+ p_real = []
+ p_class = []
+ p_prop = []
+ correct = []
+ n.times do |i|
+ if rand<0.04
+ p_real << rand
+ else
+ p_real << (a_real[i] + ((rand * p_noise) * (rand<0.5 ? 1 : -1)))
+ end
+ p_prop << ((p_cutoff-p_real[i]).abs)
+ p_class << ( p_real[-1]>p_cutoff ? "a" : "b")
+ correct << ((p_class[i]==a_class[i]) ? 1 : 0)
+ end
+
+ puts ""
+ puts p_real.to_csv
+ puts p_class.to_csv
+ puts p_prop.to_csv
+
+ p_prop_max = p_prop.max
+ p_prop_min = p_prop.min
+ p_prop_delta = p_prop_max - p_prop_min
+ n.times do |i|
+ p_prop[i] = (p_prop[i] - p_prop_min)/p_prop_delta.to_f
+ p_props[index][i] = p_prop[i]
+ end
+
+ puts p_prop.to_csv
+
+ p_classes << p_class
+
+ (0..n-2).each do |i|
+ (i+1..n-1).each do |j|
+ if p_prop[i]<p_prop[j]
+ tmp = p_prop[i]
+ p_prop[i] = p_prop[j]
+ p_prop[j] = tmp
+ tmp = correct[i]
+ correct[i] = correct[j]
+ correct[j] = tmp
+ end
+ end
+ end
+
+ puts p_prop.to_csv
+ puts correct.to_csv
+ puts "acc: "+(correct.sum/n.to_f).to_s
+
+ roc_values = {:confidence_values => p_prop,
+ :true_positives => correct}
+ tp_fp_rates = get_tp_fp_rates(roc_values)
+ labels = []
+ tp_fp_rates[:youden].each do |point,confidence|
+ labels << ["confidence: "+confidence.to_s, point[0], point[1]]
+ end
+
+ plot_data << RubyPlot::LinePlotData.new(:name => "alg"+index.to_s,
+ :x_values => tp_fp_rates[:fp_rate],
+ :y_values => tp_fp_rates[:tp_rate])
+ #,:labels => labels)
+ end
+
+ puts "instance,class,prediction_1,propability_1,prediction_2,propability_2"
+ n.times do |i|
+ puts (i+1).to_s+","+a_class[i].to_s+","+p_classes[0][i].to_s+
+ ","+p_props[0][i].to_s+
+ ","+p_classes[1][i].to_s+","+p_props[1][i].to_s
+ end
RubyPlot::plot_lines("/tmp/plot.png",
"ROC-Plot",
"False positive rate",
"True Positive Rate", plot_data )
end
- def self.get_performance_confidence_rates(roc_values, feature_type)
+ def self.get_performance_confidence_rates(pred_values, performance_attribute, performance_accept_value, feature_type, accept_values)
- c = roc_values[:confidence_values]
- p = roc_values[:predicted_values]
- a = roc_values[:actual_values]
+ c = pred_values[:confidence_values]
+ p = pred_values[:predicted_values]
+ a = pred_values[:actual_values]
raise "no prediction values for confidence plot" if p.size==0
(0..p.size-2).each do |i|
@@ -425,40 +556,28 @@ module Reports
end
end
#puts c.inspect+"\n"+a.inspect+"\n"+p.inspect+"\n\n"
-
perf = []
conf = []
-
- case feature_type
- when "classification"
- count = 0
- correct = 0
- (0..p.size-1).each do |i|
- count += 1
- correct += 1 if p[i]==a[i]
- if i>0 && (c[i]>=conf[-1]-0.00001)
- perf.pop
- conf.pop
- end
- perf << correct/count.to_f * 100
- conf << c[i]
+ predictions = nil
+ (0..p.size-1).each do |i|
+ # melt nearly identical confidence values to get a smoother graph
+ if i>0 && (c[i]>=conf[-1]-0.00001)
+ perf.pop
+ conf.pop
end
- when "regression"
- count = 0
- sum_squared_error = 0
- (0..p.size-1).each do |i|
- count += 1
- sum_squared_error += (p[i]-a[i])**2
- if i>0 && (c[i]>=conf[-1]-0.00001)
- perf.pop
- conf.pop
- end
- perf << Math.sqrt(sum_squared_error/count.to_f)
- conf << c[i]
+ if (predictions == nil)
+ data = {:predicted_values => [p[i]],:actual_values => [a[i]], :confidence_values => [c[i]],
+ :feature_type => feature_type, :accept_values => accept_values}
+ predictions = Lib::Predictions.new(data)
+ else
+ predictions.update_stats(p[i], a[i], c[i])
end
+
+ val = predictions.send(performance_attribute)
+ val = val[performance_accept_value] if val.is_a?(Hash)
+ perf << val
+ conf << c[i]
end
- #puts perf.inspect
-
return {:performance => perf,:confidence => conf}
end
@@ -553,7 +672,20 @@ end
#require "rubygems"
#require "ruby-plot"
-##Reports::PlotFactory::demo_ranking_plot
+###Reports::PlotFactory::demo_ranking_plot
+#class Array
+# def sum
+# inject( nil ) { |sum,x| sum ? sum+x : x }
+# end
+#
+# def to_csv
+# s = ""
+# each do |x|
+# s += (x.is_a?(Float) ? ("%.3f"%x) : (" "+x.to_s) )+", "
+# end
+# s
+# end
+#end
#Reports::PlotFactory::demo_roc_plot
#a = [1, 0, 1, 2, 3, 0, 2]