summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormguetlein <martin.guetlein@gmail.com>2011-11-25 09:07:50 +0100
committermguetlein <martin.guetlein@gmail.com>2011-11-25 09:07:50 +0100
commitcf60c03db2481d3816e63f058a7ed12d905ac833 (patch)
tree62622b5b087c4e1251fb011f10d6601d33e23af1
parent95703c1e7d3f6e98a200cf6dfd1cfef3a0ca0479 (diff)
add r-square plot, fix prediction updating, add weighted sample-correlation-coefficient
-rwxr-xr-xlib/predictions.rb173
-rw-r--r--report/plot_factory.rb161
-rwxr-xr-xreport/report_content.rb8
-rwxr-xr-xreport/report_factory.rb24
-rwxr-xr-xvalidation/validation_application.rb32
5 files changed, 235 insertions, 163 deletions
diff --git a/lib/predictions.rb b/lib/predictions.rb
index 6c0e996..56bdd22 100755
--- a/lib/predictions.rb
+++ b/lib/predictions.rb
@@ -25,9 +25,6 @@ module Lib
feature_type,
accept_values=nil )
- @predicted_values = predicted_values
- @actual_values = actual_values
- @confidence_values = confidence_values
@feature_type = feature_type
@accept_values = accept_values
@num_classes = 1
@@ -38,34 +35,27 @@ module Lib
raise "unknown feature_type: '"+@feature_type.to_s+"'" unless
@feature_type=="classification" || @feature_type=="regression"
- raise "no predictions" if @predicted_values.size == 0
- num_info = "predicted:"+@predicted_values.size.to_s+
- " confidence:"+@confidence_values.size.to_s+" actual:"+@actual_values.size.to_s
- raise "illegal num actual values "+num_info if @actual_values.size != @predicted_values.size
- raise "illegal num confidence values "+num_info if @confidence_values.size != @predicted_values.size
-
- @confidence_values.each{ |c| raise "illegal confidence value: '"+c.to_s+"'" unless c==nil or (c.is_a?(Numeric) and c>=0 and c<=1) }
+ raise "no predictions" if predicted_values.size == 0
+ num_info = "predicted:"+predicted_values.size.to_s+
+ " confidence:"+confidence_values.size.to_s+" actual:"+actual_values.size.to_s
+ raise "illegal num actual values "+num_info if actual_values.size != predicted_values.size
+ raise "illegal num confidence values "+num_info if confidence_values.size != predicted_values.size
case @feature_type
when "classification"
raise "accept_values missing while performing classification" unless @accept_values
@num_classes = @accept_values.size
raise "num classes < 2" if @num_classes<2
- { "predicted"=>@predicted_values, "actual"=>@actual_values }.each do |s,values|
- values.each{ |v| raise "illegal "+s+" classification-value ("+v.to_s+"),"+
- "has to be either nil or index of predicted-values" if v!=nil and (!v.is_a?(Numeric) or v<0 or v>@num_classes)}
- end
when "regression"
raise "accept_values != nil while performing regression" if @accept_values
- { "predicted"=>@predicted_values, "actual"=>@actual_values }.each do |s,values|
- values.each{ |v| raise "illegal "+s+" regression-value ("+v.to_s+"),"+
- " has to be either nil or number (not NaN, not Infinite)" unless v==nil or (v.is_a?(Numeric) and !v.nan? and v.finite?)}
- end
end
+ @predicted_values = []
+ @actual_values = []
+ @confidence_values = []
init_stats()
- (0..@predicted_values.size-1).each do |i|
- update_stats( @predicted_values[i], @actual_values[i], @confidence_values[i] )
+ (0..predicted_values.size-1).each do |i|
+ update_stats( predicted_values[i], actual_values[i], confidence_values[i] )
end
end
@@ -114,6 +104,13 @@ module Lib
@sum_squares_actual = 0
@sum_squares_predicted = 0
+ @sum_confidence = 0
+ @weighted_sum_actual = 0
+ @weighted_sum_predicted = 0
+ @weighted_sum_multiply = 0
+ @weighted_sum_squares_actual = 0
+ @weighted_sum_squares_predicted = 0
+
@sum_weighted_abs_error = 0
@sum_weighted_squared_error = 0
end
@@ -121,6 +118,25 @@ module Lib
def update_stats( predicted_value, actual_value, confidence_value )
+ raise "illegal confidence value: '"+confidence_value.to_s+"'" unless
+ confidence_value==nil or (confidence_value.is_a?(Numeric) and confidence_value>=0 and confidence_value<=1)
+ case @feature_type
+ when "classification"
+ { "predicted"=>predicted_value, "actual"=>actual_value }.each do |s,v|
+ raise "illegal "+s+" classification-value ("+v.to_s+"),"+
+ "has to be either nil or index of predicted-values" if v!=nil and (!v.is_a?(Numeric) or v<0 or v>@num_classes)
+ end
+ when "regression"
+ { "predicted"=>predicted_value, "actual"=>actual_value }.each do |s,v|
+ raise "illegal "+s+" regression-value ("+v.to_s+"),"+
+ " has to be either nil or number (not NaN, not Infinite)" unless v==nil or (v.is_a?(Numeric) and !v.nan? and v.finite?)
+ end
+ end
+
+ @predicted_values << predicted_value
+ @actual_values << actual_value
+ @confidence_values << confidence_value
+
if actual_value==nil
@num_no_actual_value += 1
else
@@ -165,6 +181,16 @@ module Lib
@sum_multiply += (actual_value*predicted_value)
@sum_squares_actual += actual_value**2
@sum_squares_predicted += predicted_value**2
+
+ if @conf_provided
+ w_a = actual_value * confidence_value
+ w_p = predicted_value * confidence_value
+ @weighted_sum_actual += w_a
+ @weighted_sum_predicted += w_p
+ @weighted_sum_multiply += (w_a*w_p) if @conf_provided
+ @weighted_sum_squares_actual += w_a**2 if @conf_provided
+ @weighted_sum_squares_predicted += w_p**2 if @conf_provided
+ end
end
end
end
@@ -514,7 +540,7 @@ module Lib
return @sum_squared_error
end
- def r_square
+ def r_square #_old
#return sample_correlation_coefficient ** 2
# see http://en.wikipedia.org/wiki/Coefficient_of_determination#Definitions
@@ -525,7 +551,7 @@ module Lib
( r_2.infinite? || r_2.nan? ) ? 0 : r_2
end
- def weighted_r_square
+ def weighted_r_square #_old
return 0 unless confidence_values_available?
ss_tot = weighted_total_sum_of_squares
return 0 if ss_tot==0
@@ -533,6 +559,16 @@ module Lib
( r_2.infinite? || r_2.nan? ) ? 0 : r_2
end
+ #def r_square
+ # # as implemted in R
+ # return sample_correlation_coefficient ** 2
+ #end
+
+ #def weighted_r_square
+ # # as implemted in R
+ # return weighted_sample_correlation_coefficient ** 2
+ #end
+
def sample_correlation_coefficient
begin
# formula see http://en.wikipedia.org/wiki/Correlation_and_dependence#Pearson.27s_product-moment_coefficient
@@ -543,6 +579,16 @@ module Lib
rescue; 0; end
end
+ def weighted_sample_correlation_coefficient
+ begin
+ # formula see http://en.wikipedia.org/wiki/Correlation_and_dependence#Pearson.27s_product-moment_coefficient
+ scc = ( @num_predicted * @weighted_sum_multiply - @weighted_sum_actual * @weighted_sum_predicted ) /
+ ( Math.sqrt( @num_predicted * @weighted_sum_squares_actual - @weighted_sum_actual**2 ) *
+ Math.sqrt( @num_predicted * @weighted_sum_squares_predicted - @weighted_sum_predicted**2 ) )
+ ( scc.infinite? || scc.nan? ) ? 0 : scc
+ rescue; 0; end
+ end
+
def total_sum_of_squares
#return @variance_actual * ( @num_predicted - 1 )
sum = 0
@@ -608,17 +654,23 @@ module Lib
return h
end
- def get_prediction_values(actual_accept_value, predicted_accept_value)
+ def get_prediction_values(performance_attr, performance_accept_value)
#puts "get_roc_values for class_value: "+class_value.to_s
raise "no confidence values" unless confidence_values_available?
#raise "no class-value specified" if class_value==nil
+ actual_accept_value = nil
+ predicted_accept_value = nil
+ if performance_attr==:true_positive_rate
+ actual_accept_value = performance_accept_value
+ elsif performance_attr==:positive_predictive_value
+ predicted_accept_value = performance_accept_value
+ end
actual_class_index = @accept_values.index(actual_accept_value) if actual_accept_value!=nil
raise "class not found '"+actual_accept_value.to_s+"' in "+@accept_values.inspect if (actual_accept_value!=nil && actual_class_index==nil)
-
predicted_class_index = @accept_values.index(predicted_accept_value) if predicted_accept_value!=nil
- raise "class not found "+predicted_accept_value.to_s+" in "+@accept_values.inspect if (predicted_accept_value!=nil && predicted_class_index==nil)
+ raise "class not found '"+predicted_accept_value.to_s+"' in "+@accept_values.inspect if (predicted_accept_value!=nil && predicted_class_index==nil)
c = []; p = []; a = []
(0..@predicted_values.size-1).each do |i|
@@ -697,6 +749,67 @@ module Lib
#end
private
+ def self.test_update
+ p=[0.4,0.2,0.3,0.5,0.8]
+ a=[0.45,0.21,0.25,0.55,0.75]
+ c = Array.new(p.size)
+ pred = Predictions.new(p,a,c,"regression")
+ puts pred.r_square
+
+ pred = nil
+ p.size.times do |i|
+ if pred==nil
+ pred = Predictions.new([p[0]],[a[0]],[c[0]],"regression")
+ else
+ pred.update_stats(p[i],a[i],c[i])
+ end
+ puts pred.r_square
+ end
+ end
+
+ def self.test_r_square
+ require "rubygems"
+ require "opentox-ruby"
+
+ max_deviation = rand * 0.9
+ avg_deviation = max_deviation * 0.5
+
+ p = []
+ a = []
+ c = []
+ (100 + rand(1000)).times do |i|
+ r = rand
+ deviation = rand * max_deviation
+ a << r
+ p << r + ((rand<0.5 ? -1 : 1) * deviation)
+ #c << 0.5
+ if (deviation > avg_deviation)
+ c << 0.4
+ else
+ c << 0.6
+ end
+ #puts a[-1].to_s+" "+p[-1].to_s
+ end
+ puts "num values "+p.size.to_s
+
+ pred = Predictions.new(p,a,c,"regression")
+ puts "internal"
+ #puts "r-square old "+pred.r_square_old.to_s
+ puts "cor "+pred.sample_correlation_coefficient.to_s
+ puts "weighted cor "+pred.weighted_sample_correlation_coefficient.to_s
+ puts "r-square "+pred.r_square.to_s
+
+ puts "R"
+ @@r = RinRuby.new(true,false) unless defined?(@@r) and @@r
+ @@r.assign "v1",a
+ @@r.assign "v2",p
+ puts "r cor "+@@r.pull("cor(v1,v2)").to_s
+ @@r.eval "fit <- lm(v1 ~ v2)"
+ @@r.eval "sum <- summary(fit)"
+ puts "r r-square "+@@r.pull("sum$r.squared").to_s
+ puts "r adjusted-r-square "+@@r.pull("sum$adj.r.squared").to_s
+ end
+
def prediction_feature_value_map(proc)
res = {}
(0..@num_classes-1).each do |i|
@@ -706,4 +819,12 @@ module Lib
end
end
-end \ No newline at end of file
+end
+
+#class Float
+# def to_s
+# "%.5f" % self
+# end
+#end
+##Lib::Predictions.test_update
+#Lib::Predictions.test_r_square
diff --git a/report/plot_factory.rb b/report/plot_factory.rb
index 2074ce5..6083d26 100644
--- a/report/plot_factory.rb
+++ b/report/plot_factory.rb
@@ -2,6 +2,10 @@ ENV['JAVA_HOME'] = "/usr/bin" unless ENV['JAVA_HOME']
ENV['PATH'] = ENV['JAVA_HOME']+":"+ENV['PATH'] unless ENV['PATH'].split(":").index(ENV['JAVA_HOME'])
ENV['RANK_PLOTTER_JAR'] = "RankPlotter/RankPlotter.jar" unless ENV['RANK_PLOTTER_JAR']
+CONF_PLOT_RANGE = { :accuracy => [0.45,1.05], :true_positive_rate => [0.45,1.05],:true_negative_rate => [0.45,1.05],
+ :false_positive_rate => [0.45,1.05], :false_negative_rate => [0.45,1.05], :positive_predictive_value => [0.45,1.05],
+ :negative_predictive_value => [0.45,1.05], :r_square => [0, 1.05], :sample_correlation_coefficient => [0, 1.05] }
+
class Array
def swap!(i,j)
tmp = self[i]
@@ -47,7 +51,6 @@ class Array
end
end
-
module Reports
module PlotFactory
@@ -81,9 +84,11 @@ module Reports
y_i = valid_indices.collect{ |i| y_i[i] }
end
- names << ( name_attribute==:crossvalidation_fold ? "fold " : "" ) + v.send(name_attribute).to_s
- x << x_i
- y << y_i
+ if x_i.size>0
+ names << ( name_attribute==:crossvalidation_fold ? "fold " : "" ) + v.send(name_attribute).to_s
+ x << x_i
+ y << y_i
+ end
end
names = [""] if names.size==1
@@ -130,31 +135,22 @@ module Reports
end
end
- def self.confidence_plot_class_performance( validation_set, actual_accept_value, predicted_accept_value )
+ def self.confidence_plot_class_performance( validation_set, performance_attribute, performance_accept_value )
true_class = nil
- if actual_accept_value==nil and predicted_accept_value==nil
- perf = "Accuracy"
- elsif actual_accept_value!=nil
- if validation_set.get_true_accept_value==actual_accept_value
- perf = "True Positive Rate"
- true_class = actual_accept_value
- elsif validation_set.get_accept_values.size==2 and validation_set.get_true_accept_value==(validation_set.get_accept_values-[actual_accept_value])[0]
- perf = "True Negative Rate"
+ if performance_accept_value==nil
+ perf = performance_attribute.to_s.nice_attr
+ else
+ invert_true_class = (validation_set.get_accept_values.size==2 and
+ validation_set.get_true_accept_value==(validation_set.get_accept_values-[performance_accept_value])[0])
+ if invert_true_class && performance_attribute==:true_positive_rate
+ perf = :true_negative_rate.to_s.nice_attr
true_class = validation_set.get_true_accept_value
- else
- perf = "True Positive Rate"
- true_class = actual_accept_value
- end
- elsif predicted_accept_value!=nil
- if validation_set.get_true_accept_value==predicted_accept_value
- perf = "Positive Predictive Value"
- true_class = predicted_accept_value
- elsif validation_set.get_accept_values.size==2 and validation_set.get_true_accept_value==(validation_set.get_accept_values-[predicted_accept_value])[0]
- perf = "Negative Predictive Value"
+ elsif invert_true_class && performance_attribute==:positive_predictive_value
+ perf = :negative_predictive_value.to_s.nice_attr
true_class = validation_set.get_true_accept_value
else
- perf = "Positive Predictive Value"
- true_class = predicted_accept_value
+ perf = performance_attribute.to_s.nice_attr
+ true_class = performance_accept_value
end
end
title = perf+" vs Confidence Plot"
@@ -162,12 +158,8 @@ module Reports
{:title =>title, :performance => perf}
end
-
- def self.create_confidence_plot( out_files, validation_set, actual_accept_value = nil,
- predicted_accept_value = nil, split_set_attribute=nil, show_single_curves=false )
+ def self.create_confidence_plot( out_files, validation_set, performance_attribute, performance_accept_value, split_set_attribute=nil, show_single_curves=false )
- raise "param combination not supported" if actual_accept_value!=nil and predicted_accept_value!=nil
-
out_files = [out_files] unless out_files.is_a?(Array)
LOGGER.debug "creating confidence plot for '"+validation_set.size.to_s+"' validations, out-file:"+out_files.inspect
@@ -178,7 +170,7 @@ module Reports
performance = []
attribute_values.each do |value|
begin
- data = transform_confidence_predictions(validation_set.filter({split_set_attribute => value}), actual_accept_value, predicted_accept_value, false)
+ data = transform_confidence_predictions(validation_set.filter({split_set_attribute => value}), performance_attribute, performance_accept_value, false)
names << split_set_attribute.to_s.nice_attr+" "+value.to_s
confidence << data[:confidence][0]
performance << data[:performance][0]
@@ -186,31 +178,21 @@ module Reports
LOGGER.warn "could not create confidence plot for "+value.to_s
end
end
- #RubyPlot::plot_lines(out_file, "Percent Correct vs Confidence Plot", "Confidence", "Percent Correct", names, fp_rates, tp_rates )
out_files.each do |out_file|
- case validation_set.unique_feature_type
- when "classification"
- info = confidence_plot_class_performance( validation_set, actual_accept_value, predicted_accept_value )
- RubyPlot::accuracy_confidence_plot(out_file, info[:title], "Confidence", info[:performance], names, confidence, performance)
- when "regression"
- RubyPlot::accuracy_confidence_plot(out_file, "RMSE vs Confidence Plot", "Confidence", "RMSE", names, confidence, performance, true)
- end
+ info = confidence_plot_class_performance( validation_set, performance_attribute, performance_accept_value )
+ RubyPlot::confidence_plot(out_file, info[:title], "Confidence", info[:performance],
+ names, confidence, performance, CONF_PLOT_RANGE[performance_attribute])
end
else
- data = transform_confidence_predictions(validation_set, actual_accept_value, predicted_accept_value, show_single_curves)
- out_files.each do |out_file|
- case validation_set.unique_feature_type
- when "classification"
- info = confidence_plot_class_performance( validation_set, actual_accept_value, predicted_accept_value )
- RubyPlot::accuracy_confidence_plot(out_file, info[:title], "Confidence", info[:performance], data[:names], data[:confidence], data[:performance])
- when "regression"
- RubyPlot::accuracy_confidence_plot(out_file, "RMSE vs Confidence Plot", "Confidence", "RMSE", data[:names], data[:confidence], data[:performance], true)
- end
+ data = transform_confidence_predictions(validation_set, performance_attribute, performance_accept_value, show_single_curves)
+ out_files.each do |out_file|
+ info = confidence_plot_class_performance( validation_set, performance_attribute, performance_accept_value )
+ RubyPlot::confidence_plot(out_file, info[:title], "Confidence", info[:performance],
+ data[:names], data[:confidence], data[:performance], CONF_PLOT_RANGE[performance_attribute])
end
end
end
-
def self.create_bar_plot( out_files, validation_set, title_attribute, value_attributes )
out_files = [out_files] unless out_files.is_a?(Array)
@@ -349,7 +331,11 @@ module Reports
end
- def self.transform_confidence_predictions(validation_set, actual_accept_value, predicted_accept_value, add_single_folds=false)
+
+ def self.transform_confidence_predictions(validation_set, performance_attribute, performance_accept_value, add_single_folds)
+
+ feature_type = validation_set.unique_feature_type
+ accept_values = validation_set.unique_feature_type=="classification" ? validation_set.get_accept_values : nil
if (validation_set.size > 1)
@@ -357,34 +343,37 @@ module Reports
sum_confidence_values = { :predicted_values => [], :actual_values => [], :confidence_values => []}
(0..validation_set.size-1).each do |i|
- confidence_values = validation_set.get(i).get_predictions.get_prediction_values(actual_accept_value, predicted_accept_value)
+ confidence_values = validation_set.get(i).get_predictions.get_prediction_values(performance_attribute, performance_accept_value)
sum_confidence_values[:predicted_values] += confidence_values[:predicted_values]
sum_confidence_values[:confidence_values] += confidence_values[:confidence_values]
sum_confidence_values[:actual_values] += confidence_values[:actual_values]
if add_single_folds
begin
- pref_conf_rates = get_performance_confidence_rates(confidence_values)
+ perf_conf_rates = get_performance_confidence_rates(confidence_values, performance_attribute, performance_accept_value,
+ feature_type, accept_values)
names << "fold "+i.to_s
- performance << pref_conf_rates[:performance]
- confidence << pref_conf_rates[:confidence]
+ performance << perf_conf_rates[:performance]
+ confidence << perf_conf_rates[:confidence]
faint << true
rescue
LOGGER.warn "could not get confidence vals for fold "+i.to_s
end
end
end
- pref_conf_rates = get_performance_confidence_rates(sum_confidence_values, validation_set.unique_feature_type)
+ perf_conf_rates = get_performance_confidence_rates(sum_confidence_values, performance_attribute, performance_accept_value,
+ feature_type, accept_values)
names << nil # "all"
- performance << pref_conf_rates[:performance]
- confidence << pref_conf_rates[:confidence]
+ performance << perf_conf_rates[:performance]
+ confidence << perf_conf_rates[:confidence]
faint << false
return { :names => names, :performance => performance, :confidence => confidence, :faint => faint }
else
- confidence_values = validation_set.validations[0].get_predictions.get_prediction_values(actual_accept_value, predicted_accept_value)
- pref_conf_rates = get_performance_confidence_rates(confidence_values, validation_set.unique_feature_type)
- return { :names => [""], :performance => [pref_conf_rates[:performance]], :confidence => [pref_conf_rates[:confidence]] }
+ confidence_values = validation_set.validations[0].get_predictions.get_prediction_values(performance_attribute, performance_accept_value)
+ perf_conf_rates = get_performance_confidence_rates(confidence_values, performance_attribute, performance_accept_value,
+ feature_type, accept_values)
+ return { :names => [""], :performance => [perf_conf_rates[:performance]], :confidence => [perf_conf_rates[:confidence]] }
end
end
@@ -408,11 +397,11 @@ module Reports
"True Positive Rate", plot_data )
end
- def self.get_performance_confidence_rates(roc_values, feature_type)
+ def self.get_performance_confidence_rates(pred_values, performance_attribute, performance_accept_value, feature_type, accept_values)
- c = roc_values[:confidence_values]
- p = roc_values[:predicted_values]
- a = roc_values[:actual_values]
+ c = pred_values[:confidence_values]
+ p = pred_values[:predicted_values]
+ a = pred_values[:actual_values]
raise "no prediction values for confidence plot" if p.size==0
(0..p.size-2).each do |i|
@@ -425,40 +414,26 @@ module Reports
end
end
#puts c.inspect+"\n"+a.inspect+"\n"+p.inspect+"\n\n"
-
perf = []
conf = []
-
- case feature_type
- when "classification"
- count = 0
- correct = 0
- (0..p.size-1).each do |i|
- count += 1
- correct += 1 if p[i]==a[i]
- if i>0 && (c[i]>=conf[-1]-0.00001)
- perf.pop
- conf.pop
- end
- perf << correct/count.to_f * 100
- conf << c[i]
+ predictions = nil
+ (0..p.size-1).each do |i|
+ # melt nearly identical confidence values to get a smoother graph
+ if i>0 && (c[i]>=conf[-1]-0.00001)
+ perf.pop
+ conf.pop
end
- when "regression"
- count = 0
- sum_squared_error = 0
- (0..p.size-1).each do |i|
- count += 1
- sum_squared_error += (p[i]-a[i])**2
- if i>0 && (c[i]>=conf[-1]-0.00001)
- perf.pop
- conf.pop
- end
- perf << Math.sqrt(sum_squared_error/count.to_f)
- conf << c[i]
+ if (predictions == nil)
+ predictions = Lib::Predictions.new([p[i]],[a[i]],[c[i]],feature_type, accept_values)
+ else
+ predictions.update_stats(p[i], a[i], c[i])
end
+
+ val = predictions.send(performance_attribute)
+ val = val[performance_accept_value] if val.is_a?(Hash)
+ perf << val
+ conf << c[i]
end
- #puts perf.inspect
-
return {:performance => perf,:confidence => conf}
end
diff --git a/report/report_content.rb b/report/report_content.rb
index 8d6d44b..61db340 100755
--- a/report/report_content.rb
+++ b/report/report_content.rb
@@ -156,6 +156,7 @@ class Reports::ReportContent
section_text += "\nWARNING: regression plot information not available for all validation results" if prediction_set.size!=validation_set.size
@xml_report.add_paragraph(section_regr, section_text) if section_text
+
begin
log_str = (log ? "_log" : "")
plot_png = add_tmp_file("regr_plot"+log_str, "png")
@@ -213,8 +214,8 @@ class Reports::ReportContent
end
def add_confidence_plot( validation_set,
- actual_accept_value = nil,
- predicted_accept_value = nil,
+ performance_attribute,
+ performance_accept_value,
split_set_attribute = nil,
image_title = "Confidence Plot",
section_text="")
@@ -234,7 +235,8 @@ class Reports::ReportContent
begin
plot_png = add_tmp_file("conf_plot", "png")
plot_svg = add_tmp_file("conf_plot", "svg")
- Reports::PlotFactory.create_confidence_plot( [plot_png[:path], plot_svg[:path]], prediction_set, actual_accept_value, predicted_accept_value, split_set_attribute, false )
+ Reports::PlotFactory.create_confidence_plot( [plot_png[:path], plot_svg[:path]], prediction_set, performance_attribute,
+ performance_accept_value, split_set_attribute, false )
@xml_report.add_imagefigure(section_conf, image_title, plot_png[:name], "PNG", 100, plot_svg[:name])
rescue Exception => ex
msg = "WARNING could not create confidence plot: "+ex.message
diff --git a/report/report_factory.rb b/report/report_factory.rb
index 9995b42..484cf12 100755
--- a/report/report_factory.rb
+++ b/report/report_factory.rb
@@ -85,10 +85,10 @@ module Reports::ReportFactory
report.align_last_two_images "ROC Plots"
end
end
- report.add_confidence_plot(validation_set)
+ report.add_confidence_plot(validation_set, :accuracy, nil)
validation_set.get_accept_values.each do |accept_value|
- report.add_confidence_plot(validation_set, accept_value, nil)
- report.add_confidence_plot(validation_set, nil, accept_value)
+ report.add_confidence_plot(validation_set, :true_positive_rate, accept_value)
+ report.add_confidence_plot(validation_set, :positive_predictive_value, accept_value)
report.align_last_two_images "Confidence Plots"
end
report.end_section
@@ -96,7 +96,9 @@ module Reports::ReportFactory
report.add_result(validation_set, [:validation_uri] + VAL_ATTR_TRAIN_TEST + VAL_ATTR_REGR, "Results", "Results")
report.add_section("Plots")
report.add_regression_plot(validation_set, :model_uri)
- report.add_confidence_plot(validation_set)
+ report.add_confidence_plot(validation_set, :root_mean_squared_error, nil)
+ report.add_confidence_plot(validation_set, :r_square, nil)
+ report.align_last_two_images "Confidence Plots"
report.end_section
end
task.progress(90) if task
@@ -146,10 +148,10 @@ module Reports::ReportFactory
report.align_last_two_images "ROC Plots"
end
end
- report.add_confidence_plot(validation_set,nil,nil,split_attribute)
+ report.add_confidence_plot(validation_set,:accuracy,nil,split_attribute)
validation_set.get_accept_values.each do |accept_value|
- report.add_confidence_plot(validation_set, accept_value, nil,split_attribute)
- report.add_confidence_plot(validation_set, nil, accept_value,split_attribute)
+ report.add_confidence_plot(validation_set, :true_positive_rate, accept_value, split_attribute)
+ report.add_confidence_plot(validation_set, :positive_predictive_value, accept_value, split_attribute)
report.align_last_two_images "Confidence Plots"
end
end
@@ -160,8 +162,12 @@ module Reports::ReportFactory
report.add_result(cv_set, [:crossvalidation_uri]+VAL_ATTR_CV+VAL_ATTR_REGR-[:crossvalidation_fold],res_titel, res_titel, res_text)
report.add_section("Plots")
report.add_regression_plot(validation_set, :crossvalidation_fold)
- report.add_confidence_plot(validation_set)
- report.add_confidence_plot(validation_set, nil, :crossvalidation_fold)
+ report.add_confidence_plot(validation_set, :root_mean_squared_error, nil)
+ report.add_confidence_plot(validation_set, :r_square, nil)
+ report.align_last_two_images "Confidence Plots"
+ report.add_confidence_plot(validation_set, :root_mean_squared_error, nil, :crossvalidation_fold)
+ report.add_confidence_plot(validation_set, :r_square, nil, :crossvalidation_fold)
+ report.align_last_two_images "Confidence Plots Across Folds"
report.end_section
report.add_result(validation_set, [:validation_uri, :validation_report_uri]+VAL_ATTR_CV+VAL_ATTR_REGR-[:num_folds, :dataset_uri, :algorithm_uri], "Results","Results")
end
diff --git a/validation/validation_application.rb b/validation/validation_application.rb
index 4b6763a..4b2a2d9 100755
--- a/validation/validation_application.rb
+++ b/validation/validation_application.rb
@@ -570,38 +570,6 @@ post '/validate_datasets' do
return_task(task)
end
-get '/:id/verify_r_square' do
-
- #PENDING: this is debug code, move to test-suite
-
- validation = Validation::Validation.get(params[:id])
- p = validation.compute_validation_stats_with_model(nil, true)
-
- puts "actual "+p.actual_values.inspect
- puts "predicted "+p.predicted_values.inspect
- puts ""
-
- puts "ot r-square "+p.r_square.to_s
- puts "ot sample_correlation_coefficient "+p.sample_correlation_coefficient.to_s
- puts "ot sample_correlation_coefficient**2 "+(p.sample_correlation_coefficient**2).to_s
- puts ""
-
- @@r = RinRuby.new(true,false) unless defined?(@@r) and @@r
- @@r.assign "v1",p.actual_values
- @@r.assign "v2",p.predicted_values
- puts "r cor "+@@r.pull("cor(v1,v2)").to_s
- # @@r.eval "ttest = t.test(v1,v2,paired=T)"
- # t = @@r.pull "ttest$statistic"
- @@r.eval "fit <- lm(v1 ~ v2)"
- @@r.eval "sum <- summary(fit)"
- puts "r r-square "+@@r.pull("sum$r.squared").to_s
- puts "r adjusted-r-square "+@@r.pull("sum$adj.r.squared").to_s
-
- @@r.quit
- @@r = nil
-
-end
-
get '/:id/predictions' do
LOGGER.info "get validation predictions "+params.inspect
begin