summaryrefslogtreecommitdiff
path: root/report
diff options
context:
space:
mode:
authormguetlein <martin.guetlein@gmail.com>2011-08-18 10:38:51 +0200
committermguetlein <martin.guetlein@gmail.com>2011-08-18 10:38:51 +0200
commitd27d53d98238ede80fc3b1a0c277ca890a84c736 (patch)
treec40f2952c7b569976f5de8e754937e85c9a75ed6 /report
parent01cc1d014f1f9ccdeb5925e3fa7d64b2d06c2085 (diff)
fix ROC stuff, rename weighted_auc to average_auc
Diffstat (limited to 'report')
-rw-r--r--report/plot_factory.rb37
-rwxr-xr-xreport/report_content.rb5
-rwxr-xr-xreport/report_factory.rb36
-rwxr-xr-xreport/validation_data.rb4
4 files changed, 41 insertions, 41 deletions
diff --git a/report/plot_factory.rb b/report/plot_factory.rb
index bf59960..27e934d 100644
--- a/report/plot_factory.rb
+++ b/report/plot_factory.rb
@@ -294,15 +294,14 @@ module Reports
private
def self.transform_roc_predictions(validation_set, class_value, add_label=true )
if (validation_set.size > 1)
- values = { :predicted_values => [], :actual_values => [], :confidence_values => []}
+ values = { :true_positives => [], :confidence_values => []}
(0..validation_set.size-1).each do |i|
- roc_values = validation_set.get(i).get_predictions.get_prediction_values(class_value)
- values[:predicted_values] += roc_values[:predicted_values]
+ roc_values = validation_set.get(i).get_predictions.get_roc_prediction_values(class_value)
+ values[:true_positives ] += roc_values[:true_positives ]
values[:confidence_values] += roc_values[:confidence_values]
- values[:actual_values] += roc_values[:actual_values]
end
else
- values = validation_set.validations[0].get_predictions.get_prediction_values(class_value)
+ values = validation_set.validations[0].get_predictions.get_roc_prediction_values(class_value)
end
tp_fp_rates = get_tp_fp_rates(values)
labels = []
@@ -357,8 +356,7 @@ module Reports
# :predicted_values => [1, 0, 0, 1, 0, 1],
# :actual_values => [0, 1, 0, 0, 1, 1]}
roc_values = {:confidence_values => [0.9, 0.8, 0.7, 0.6, 0.5, 0.4],
- :predicted_values => [1, 1, 1, 1, 1, 1],
- :actual_values => [1, 0, 1, 0, 1, 0]}
+ :true_positives => [1, 1, 1, 0, 1, 0]}
tp_fp_rates = get_tp_fp_rates(roc_values)
labels = []
tp_fp_rates[:youden].each do |point,confidence|
@@ -431,16 +429,15 @@ module Reports
def self.get_tp_fp_rates(roc_values)
c = roc_values[:confidence_values]
- p = roc_values[:predicted_values]
- a = roc_values[:actual_values]
- raise "no prediction values for roc-plot" if p.size==0
+ tp = roc_values[:true_positives]
+ raise "no prediction values for roc-plot" if tp.size==0
# hack for painting perfect/worst roc curve, otherwhise fp/tp-rate will always be 100%
# determine if perfect/worst roc curve
fp_found = false
tp_found = false
- (0..p.size-1).each do |i|
- if a[i]!=p[i]
+ (0..tp.size-1).each do |i|
+ if tp[i]==0
fp_found |= true
else
tp_found |=true
@@ -448,28 +445,26 @@ module Reports
break if tp_found and fp_found
end
unless fp_found and tp_found #if perfect/worst add wrong/right instance with lowest confidence
- a << (tp_found ? 0 : 1)
- p << 1
+ tp << (tp_found ? 0 : 1)
c << -Float::MAX
end
- (0..p.size-2).each do |i|
- ((i+1)..p.size-1).each do |j|
+ (0..tp.size-2).each do |i|
+ ((i+1)..tp.size-1).each do |j|
if c[i]<c[j]
c.swap!(i,j)
- a.swap!(i,j)
- p.swap!(i,j)
+ tp.swap!(i,j)
end
end
end
- #puts c.inspect+"\n"+a.inspect+"\n"+p.inspect+"\n\n"
+ #puts c.inspect+"\n"+tp.inspect+"\n\n"
tp_rate = [0]
fp_rate = [0]
w = [1]
c2 = [Float::MAX]
- (0..p.size-1).each do |i|
- if a[i]==p[i]
+ (0..tp.size-1).each do |i|
+ if tp[i]==1
tp_rate << tp_rate[-1]+1
fp_rate << fp_rate[-1]
else
diff --git a/report/report_content.rb b/report/report_content.rb
index 8c437a8..9c33038 100755
--- a/report/report_content.rb
+++ b/report/report_content.rb
@@ -179,13 +179,14 @@ class Reports::ReportContent
def add_roc_plot( validation_set,
accept_value,
split_set_attribute=nil,
- image_title = "ROC Plot",
+ image_title = nil,
section_text="")
#section_roc = @xml_report.add_section(@current_section, section_title)
section_roc = @current_section
prediction_set = validation_set.collect{ |v| v.get_predictions && v.get_predictions.confidence_values_available? }
-
+ image_title = "ROC Plot (true class is '"+accept_value.to_s+"')" unless image_title
+
if prediction_set.size>0
if prediction_set.size!=validation_set.size
section_text += "\nWARNING: roc plot information not available for all validation results"
diff --git a/report/report_factory.rb b/report/report_factory.rb
index 340f276..1cf7b94 100755
--- a/report/report_factory.rb
+++ b/report/report_factory.rb
@@ -5,19 +5,19 @@ VAL_ATTR_TRAIN_TEST = [ :model_uri, :training_dataset_uri, :test_dataset_uri, :p
VAL_ATTR_CV = [ :algorithm_uri, :dataset_uri, :num_folds, :crossvalidation_fold ]
# selected attributes of interest when performing classification
-VAL_ATTR_CLASS = [ :num_instances, :num_unpredicted, :accuracy, :weighted_accuracy, :weighted_area_under_roc,
+VAL_ATTR_CLASS = [ :num_instances, :num_unpredicted, :accuracy, :weighted_accuracy, :average_area_under_roc,
:area_under_roc, :f_measure, :true_positive_rate, :true_negative_rate ]
VAL_ATTR_REGR = [ :num_instances, :num_unpredicted, :root_mean_squared_error,
:weighted_root_mean_squared_error, :mean_absolute_error, :weighted_mean_absolute_error, :r_square, :weighted_r_square,
:sample_correlation_coefficient ]
-#VAL_ATTR_BAR_PLOT_CLASS = [ :accuracy, :weighted_area_under_roc,
+#VAL_ATTR_BAR_PLOT_CLASS = [ :accuracy, :average_area_under_roc,
# :area_under_roc, :f_measure, :true_positive_rate, :true_negative_rate ]
VAL_ATTR_BAR_PLOT_CLASS = [ :accuracy, :f_measure, :true_positive_rate, :true_negative_rate ]
VAL_ATTR_BAR_PLOT_REGR = [ :root_mean_squared_error, :mean_absolute_error, :r_square ]
VAL_ATTR_TTEST_REGR = [:r_square, :root_mean_squared_error]
-VAL_ATTR_TTEST_CLASS = [:percent_correct, :weighted_area_under_roc]
+VAL_ATTR_TTEST_CLASS = [:percent_correct, :average_area_under_roc]
# = Reports::ReportFactory
@@ -76,11 +76,13 @@ module Reports::ReportFactory
report.add_result(validation_set, [:validation_uri] + VAL_ATTR_TRAIN_TEST + VAL_ATTR_CLASS, "Results", "Results")
report.add_confusion_matrix(val)
report.add_section("Plots")
- ([nil] + validation_set.get_accept_values).each do |accept_value|
- report.add_roc_plot(validation_set, accept_value)
- report.add_confidence_plot(validation_set, accept_value)
- title = accept_value ? "Plots for predicted class-value '"+accept_value.to_s+"'" : "Plots for all predictions"
- report.align_last_two_images title
+ report.add_confidence_plot(validation_set)
+ if (validation_set.get_accept_values.size == 2)
+ report.add_roc_plot(validation_set, validation_set.get_accept_values[0])
+ else
+ validation_set.get_accept_values.each do |accept_value|
+ report.add_roc_plot(validation_set, accept_value)
+ end
end
report.end_section
when "regression"
@@ -127,12 +129,14 @@ module Reports::ReportFactory
report.add_confusion_matrix(cv_set.validations[0])
report.add_section("Plots")
[nil, :crossvalidation_fold].each do |split_attribute|
- ([nil] + validation_set.get_accept_values).each do |accept_value|
- report.add_roc_plot(validation_set, accept_value, split_attribute)
- report.add_confidence_plot(validation_set, accept_value, split_attribute)
- title = accept_value ? "Plots for predicted class-value '"+accept_value.to_s+"'" : "Plots for all predictions"
- title += split_attribute ? ", separated by crossvalidation fold" : " (accumulated over all folds)"
- report.align_last_two_images title
+
+ report.add_confidence_plot(validation_set,nil,split_attribute)
+ if (validation_set.get_accept_values.size == 2)
+ report.add_roc_plot(validation_set, validation_set.get_accept_values[0], split_attribute)
+ else
+ validation_set.get_accept_values.each do |accept_value|
+ report.add_roc_plot(validation_set, accept_value, split_attribute)
+ end
end
end
report.end_section
@@ -199,8 +203,8 @@ module Reports::ReportFactory
if (validation_set.num_different_values(:dataset_uri)>1)
all_merged = validation_set.merge([:algorithm_uri, :dataset_uri, :crossvalidation_id, :crossvalidation_uri])
report.add_ranking_plots(all_merged, :algorithm_uri, :dataset_uri,
- [:percent_correct, :weighted_area_under_roc, :true_positive_rate, :true_negative_rate] )
- report.add_result_overview(all_merged, :algorithm_uri, :dataset_uri, [:percent_correct, :weighted_area_under_roc, :true_positive_rate, :true_negative_rate])
+ [:percent_correct, :average_area_under_roc, :true_positive_rate, :true_negative_rate] )
+ report.add_result_overview(all_merged, :algorithm_uri, :dataset_uri, [:percent_correct, :average_area_under_roc, :true_positive_rate, :true_negative_rate])
end
result_attributes = [:identifier,:crossvalidation_uri,:crossvalidation_report_uri]+VAL_ATTR_CV-[:crossvalidation_fold,:num_folds,:dataset_uri]
diff --git a/report/validation_data.rb b/report/validation_data.rb
index aa146a6..b6522b6 100755
--- a/report/validation_data.rb
+++ b/report/validation_data.rb
@@ -1,9 +1,9 @@
# the variance is computed when merging results for these attributes
VAL_ATTR_VARIANCE = [ :area_under_roc, :percent_correct, :root_mean_squared_error, :mean_absolute_error,
- :r_square, :accuracy, :weighted_area_under_roc, :weighted_accuracy, :weighted_root_mean_squared_error, :weighted_mean_absolute_error,
+ :r_square, :accuracy, :average_area_under_roc, :weighted_accuracy, :weighted_root_mean_squared_error, :weighted_mean_absolute_error,
:weighted_r_square ]
-VAL_ATTR_RANKING = [ :area_under_roc, :percent_correct, :true_positive_rate, :true_negative_rate, :weighted_area_under_roc, :accuracy, :f_measure ]
+VAL_ATTR_RANKING = [ :area_under_roc, :percent_correct, :true_positive_rate, :true_negative_rate, :average_area_under_roc, :accuracy, :f_measure ]
ATTR_NICE_NAME = {}