summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormguetlein <martin.guetlein@gmail.com>2011-12-07 16:26:41 +0100
committermguetlein <martin.guetlein@gmail.com>2011-12-07 16:26:41 +0100
commit7565aeb930c9b24a677b65b89d62cc2db6318cee (patch)
tree7a482f31abc0af41de782448fa070fbf1296beb8
parentcf60c03db2481d3816e63f058a7ed12d905ac833 (diff)
add computation of classification prediciton probabilities
-rwxr-xr-xlib/ot_predictions.rb270
-rwxr-xr-xlib/predictions.rb37
-rwxr-xr-xlib/validation_db.rb1
-rwxr-xr-xvalidation/validation_application.rb25
-rwxr-xr-xvalidation/validation_service.rb30
5 files changed, 236 insertions, 127 deletions
diff --git a/lib/ot_predictions.rb b/lib/ot_predictions.rb
index 335fe84..cf0168e 100755
--- a/lib/ot_predictions.rb
+++ b/lib/ot_predictions.rb
@@ -8,155 +8,175 @@ module Lib
CHECK_VALUES = ENV['RACK_ENV'] =~ /debug|test/
def identifier(instance_index)
- return compound(instance_index)
+ compound(instance_index)
end
def compound(instance_index)
- return @compounds[instance_index]
+ @compounds[instance_index]
end
-
+
def initialize( feature_type, test_dataset_uris, test_target_dataset_uris,
- prediction_feature, prediction_dataset_uris, predicted_variables, predicted_confidences, subjectid=nil, task=nil)
+ prediction_feature, prediction_dataset_uris, predicted_variables, predicted_confidences,
+ subjectid=nil, task=nil )
+
+ test_dataset_uris = [test_dataset_uris] unless test_dataset_uris.is_a?(Array)
+ test_target_dataset_uris = [test_target_dataset_uris] unless test_target_dataset_uris.is_a?(Array)
+ prediction_dataset_uris = [prediction_dataset_uris] unless prediction_dataset_uris.is_a?(Array)
+ predicted_variables = [predicted_variables] unless predicted_variables.is_a?(Array)
+ predicted_confidences = [predicted_confidences] unless predicted_confidences.is_a?(Array)
+ LOGGER.debug "loading prediction -- test-dataset: "+test_dataset_uris.inspect
+ LOGGER.debug "loading prediction -- test-target-datset: "+test_target_dataset_uris.inspect
+ LOGGER.debug "loading prediction -- prediction-dataset: "+prediction_dataset_uris.inspect
+ LOGGER.debug "loading prediction -- predicted_variable: "+predicted_variables.inspect
+ LOGGER.debug "loading prediction -- predicted_confidence: "+predicted_confidences.inspect
+ LOGGER.debug "loading prediction -- prediction_feature: "+prediction_feature.to_s
+ raise "prediction_feature missing" unless prediction_feature
+
+ @compounds = []
+ all_predicted_values = []
+ all_actual_values = []
+ all_confidence_values = []
+ accept_values = nil
- test_dataset_uris = [test_dataset_uris] unless test_dataset_uris.is_a?(Array)
- test_target_dataset_uris = [test_target_dataset_uris] unless test_target_dataset_uris.is_a?(Array)
- prediction_dataset_uris = [prediction_dataset_uris] unless prediction_dataset_uris.is_a?(Array)
- predicted_variables = [predicted_variables] unless predicted_variables.is_a?(Array)
- predicted_confidences = [predicted_confidences] unless predicted_confidences.is_a?(Array)
- LOGGER.debug "loading prediction -- test-dataset: "+test_dataset_uris.inspect
- LOGGER.debug "loading prediction -- test-target-datset: "+test_target_dataset_uris.inspect
- LOGGER.debug "loading prediction -- prediction-dataset: "+prediction_dataset_uris.inspect
- LOGGER.debug "loading prediction -- predicted_variable: "+predicted_variables.inspect
- LOGGER.debug "loading prediction -- predicted_confidence: "+predicted_confidences.inspect
- LOGGER.debug "loading prediction -- prediction_feature: "+prediction_feature.to_s
- raise "prediction_feature missing" unless prediction_feature
+ if task
+ task_step = 100 / (test_dataset_uris.size*2 + 1)
+ task_status = 0
+ end
+
+ test_dataset_uris.size.times do |i|
- @compounds = []
- all_predicted_values = []
- all_actual_values = []
- all_confidence_values = []
- accept_values = nil
+ test_dataset_uri = test_dataset_uris[i]
+ test_target_dataset_uri = test_target_dataset_uris[i]
+ prediction_dataset_uri = prediction_dataset_uris[i]
+ predicted_variable = predicted_variables[i]
+ predicted_confidence = predicted_confidences[i]
- if task
- task_step = 100 / (test_dataset_uris.size*2 + 1)
- task_status = 0
+ predicted_variable=prediction_feature if predicted_variable==nil
+
+ test_dataset = Lib::DatasetCache.find test_dataset_uri,subjectid
+ raise "test dataset not found: '"+test_dataset_uri.to_s+"'" unless test_dataset
+
+ if test_target_dataset_uri == nil || test_target_dataset_uri.strip.size==0 || test_target_dataset_uri==test_dataset_uri
+ test_target_dataset_uri = test_dataset_uri
+ test_target_dataset = test_dataset
+ raise "prediction_feature not found in test_dataset, specify a test_target_dataset\n"+
+ "prediction_feature: '"+prediction_feature.to_s+"'\n"+
+ "test_dataset: '"+test_target_dataset_uri.to_s+"'\n"+
+ "available features are: "+test_target_dataset.features.inspect if test_target_dataset.features.keys.index(prediction_feature)==nil
+ else
+ test_target_dataset = Lib::DatasetCache.find test_target_dataset_uri,subjectid
+ raise "test target datset not found: '"+test_target_dataset_uri.to_s+"'" unless test_target_dataset
+ if CHECK_VALUES
+ test_dataset.compounds.each do |c|
+ raise "test compound not found on test class dataset "+c.to_s unless test_target_dataset.compounds.include?(c)
+ end
+ end
+ raise "prediction_feature not found in test_target_dataset\n"+
+ "prediction_feature: '"+prediction_feature.to_s+"'\n"+
+ "test_target_dataset: '"+test_target_dataset_uri.to_s+"'\n"+
+ "available features are: "+test_target_dataset.features.inspect if test_target_dataset.features.keys.index(prediction_feature)==nil
end
-
- test_dataset_uris.size.times do |i|
-
- test_dataset_uri = test_dataset_uris[i]
- test_target_dataset_uri = test_target_dataset_uris[i]
- prediction_dataset_uri = prediction_dataset_uris[i]
- predicted_variable = predicted_variables[i]
- predicted_confidence = predicted_confidences[i]
-
- predicted_variable=prediction_feature if predicted_variable==nil
- test_dataset = Lib::DatasetCache.find test_dataset_uri,subjectid
- raise "test dataset not found: '"+test_dataset_uri.to_s+"'" unless test_dataset
+ compounds = test_dataset.compounds
+ LOGGER.debug "test dataset size: "+compounds.size.to_s
+ raise "test dataset is empty "+test_dataset_uri.to_s unless compounds.size>0
- if test_target_dataset_uri == nil || test_target_dataset_uri.strip.size==0 || test_target_dataset_uri==test_dataset_uri
- test_target_dataset_uri = test_dataset_uri
- test_target_dataset = test_dataset
- raise "prediction_feature not found in test_dataset, specify a test_target_dataset\n"+
- "prediction_feature: '"+prediction_feature.to_s+"'\n"+
- "test_dataset: '"+test_target_dataset_uri.to_s+"'\n"+
- "available features are: "+test_target_dataset.features.inspect if test_target_dataset.features.keys.index(prediction_feature)==nil
+ if feature_type=="classification"
+ av = test_target_dataset.accept_values(prediction_feature)
+ raise "'"+OT.acceptValue.to_s+"' missing/invalid for feature '"+prediction_feature.to_s+"' in dataset '"+
+ test_target_dataset_uri.to_s+"', acceptValues are: '"+av.inspect+"'" if av==nil or av.length<2
+ if accept_values==nil
+ accept_values=av
else
- test_target_dataset = Lib::DatasetCache.find test_target_dataset_uri,subjectid
- raise "test target datset not found: '"+test_target_dataset_uri.to_s+"'" unless test_target_dataset
- if CHECK_VALUES
- test_dataset.compounds.each do |c|
- raise "test compound not found on test class dataset "+c.to_s unless test_target_dataset.compounds.include?(c)
- end
- end
- raise "prediction_feature not found in test_target_dataset\n"+
- "prediction_feature: '"+prediction_feature.to_s+"'\n"+
- "test_target_dataset: '"+test_target_dataset_uri.to_s+"'\n"+
- "available features are: "+test_target_dataset.features.inspect if test_target_dataset.features.keys.index(prediction_feature)==nil
+ raise "accept values (in folds) differ "+av.inspect+" != "+accept_values.inspect if av!=accept_values
end
-
- compounds = test_dataset.compounds
- LOGGER.debug "test dataset size: "+compounds.size.to_s
- raise "test dataset is empty "+test_dataset_uri.to_s unless compounds.size>0
-
- if feature_type=="classification"
- av = test_target_dataset.accept_values(prediction_feature)
- raise "'"+OT.acceptValue.to_s+"' missing/invalid for feature '"+prediction_feature.to_s+"' in dataset '"+
- test_target_dataset_uri.to_s+"', acceptValues are: '"+av.inspect+"'" if av==nil or av.length<2
- if accept_values==nil
- accept_values=av
- else
- raise "accept values (in folds) differ "+av.inspect+" != "+accept_values.inspect if av!=accept_values
- end
+ end
+
+ actual_values = []
+ compounds.each do |c|
+ case feature_type
+ when "classification"
+ actual_values << classification_val(test_target_dataset, c, prediction_feature, accept_values)
+ when "regression"
+ actual_values << regression_val(test_target_dataset, c, prediction_feature)
end
-
- actual_values = []
- compounds.each do |c|
+ end
+ task.progress( task_status += task_step ) if task # loaded actual values
+
+ prediction_dataset = Lib::DatasetCache.find prediction_dataset_uri,subjectid
+ raise "prediction dataset not found: '"+prediction_dataset_uri.to_s+"'" unless prediction_dataset
+
+ # allow missing prediction feature if there are no compounds in the prediction dataset
+ raise "predicted_variable not found in prediction_dataset\n"+
+ "predicted_variable '"+predicted_variable.to_s+"'\n"+
+ "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+
+ "available features are: "+prediction_dataset.features.inspect if prediction_dataset.features.keys.index(predicted_variable)==nil and prediction_dataset.compounds.size>0
+ raise "predicted_confidence not found in prediction_dataset\n"+
+ "predicted_confidence '"+predicted_confidence.to_s+"'\n"+
+ "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+
+ "available features are: "+prediction_dataset.features.inspect if predicted_confidence and prediction_dataset.features.keys.index(predicted_confidence)==nil and prediction_dataset.compounds.size>0
+
+ raise "more predicted than test compounds, #test: "+compounds.size.to_s+" < #prediction: "+
+ prediction_dataset.compounds.size.to_s+", test-dataset: "+test_dataset_uri.to_s+", prediction-dataset: "+
+ prediction_dataset_uri if compounds.size < prediction_dataset.compounds.size
+ if CHECK_VALUES
+ prediction_dataset.compounds.each do |c|
+ raise "predicted compound not found in test dataset:\n"+c+"\ntest-compounds:\n"+
+ compounds.collect{|c| c.to_s}.join("\n") if compounds.index(c)==nil
+ end
+ end
+
+ predicted_values = []
+ confidence_values = []
+ count = 0
+ compounds.each do |c|
+ if prediction_dataset.compounds.index(c)==nil
+ predicted_values << nil
+ confidence_values << nil
+ else
case feature_type
when "classification"
- actual_values << classification_val(test_target_dataset, c, prediction_feature, accept_values)
+ predicted_values << classification_val(prediction_dataset, c, predicted_variable, accept_values)
when "regression"
- actual_values << regression_val(test_target_dataset, c, prediction_feature)
+ predicted_values << regression_val(prediction_dataset, c, predicted_variable)
end
- end
- task.progress( task_status += task_step ) if task # loaded actual values
-
- prediction_dataset = Lib::DatasetCache.find prediction_dataset_uri,subjectid
- raise "prediction dataset not found: '"+prediction_dataset_uri.to_s+"'" unless prediction_dataset
-
- # allow missing prediction feature if there are no compounds in the prediction dataset
- raise "predicted_variable not found in prediction_dataset\n"+
- "predicted_variable '"+predicted_variable.to_s+"'\n"+
- "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+
- "available features are: "+prediction_dataset.features.inspect if prediction_dataset.features.keys.index(predicted_variable)==nil and prediction_dataset.compounds.size>0
- raise "predicted_confidence not found in prediction_dataset\n"+
- "predicted_confidence '"+predicted_confidence.to_s+"'\n"+
- "prediction_dataset: '"+prediction_dataset_uri.to_s+"'\n"+
- "available features are: "+prediction_dataset.features.inspect if predicted_confidence and prediction_dataset.features.keys.index(predicted_confidence)==nil and prediction_dataset.compounds.size>0
-
- raise "more predicted than test compounds, #test: "+compounds.size.to_s+" < #prediction: "+
- prediction_dataset.compounds.size.to_s+", test-dataset: "+test_dataset_uri.to_s+", prediction-dataset: "+
- prediction_dataset_uri if compounds.size < prediction_dataset.compounds.size
- if CHECK_VALUES
- prediction_dataset.compounds.each do |c|
- raise "predicted compound not found in test dataset:\n"+c+"\ntest-compounds:\n"+
- compounds.collect{|c| c.to_s}.join("\n") if compounds.index(c)==nil
- end
- end
-
- predicted_values = []
- confidence_values = []
- count = 0
- compounds.each do |c|
- if prediction_dataset.compounds.index(c)==nil
- predicted_values << nil
- confidence_values << nil
+ if predicted_confidence
+ confidence_values << confidence_val(prediction_dataset, c, predicted_confidence)
else
- case feature_type
- when "classification"
- predicted_values << classification_val(prediction_dataset, c, predicted_variable, accept_values)
- when "regression"
- predicted_values << regression_val(prediction_dataset, c, predicted_variable)
- end
- if predicted_confidence
- confidence_values << confidence_val(prediction_dataset, c, predicted_confidence)
- else
- confidence_values << nil
- end
+ confidence_values << nil
end
- count += 1
end
- @compounds += compounds
- all_predicted_values += predicted_values
- all_actual_values += actual_values
- all_confidence_values += confidence_values
-
- task.progress( task_status += task_step ) if task # loaded predicted values and confidence
+ count += 1
end
+ @compounds += compounds
+ all_predicted_values += predicted_values
+ all_actual_values += actual_values
+ all_confidence_values += confidence_values
+ task.progress( task_status += task_step ) if task # loaded predicted values and confidence
+ end
+
+ #sort according to confidence if available
+ if all_confidence_values.compact.size>0
+ values = []
+ all_predicted_values.size.times do |i|
+ values << [all_predicted_values[i], all_actual_values[i], all_confidence_values[i], @compounds[i]]
+ end
+ values = values.sort_by{ |v| v[2] || 0 }.reverse # sorting by confidence
+ all_predicted_values = []
+ all_actual_values = []
+ all_confidence_values = []
+ @compounds = []
+ values.each do |v|
+ all_predicted_values << v[0]
+ all_actual_values << v[1]
+ all_confidence_values << v[2]
+ @compounds << v[3]
+ end
+ end
+
super(all_predicted_values, all_actual_values, all_confidence_values, feature_type, accept_values)
- raise "illegal num compounds "+num_info if @compounds.size != @predicted_values.size
+ raise "illegal num compounds "+num_info if @compounds.size != @predicted_values.size
task.progress(100) if task # done with the mathmatics
end
diff --git a/lib/predictions.rb b/lib/predictions.rb
index 56bdd22..bd32efb 100755
--- a/lib/predictions.rb
+++ b/lib/predictions.rb
@@ -18,7 +18,29 @@ module Lib
def identifier(instance_index)
return instance_index.to_s
end
-
+
+ def data
+ { :predicted_values => @predicted_values, :actual_values => @actual_values, :confidence_values => @confidence_values,
+ :feature_type => @feature_type, :accept_values => @accept_values }
+ end
+
+ def self.from_data( data, min_confidence=nil, prediction_index=nil )
+ if min_confidence!=nil
+ valid_indices = []
+ data[:confidence_values].size.times do |i|
+ valid_indices << i if prediction_index==data[:predicted_values][i] and
+ (valid_indices.size<=12 or data[:confidence_values][i]>=min_confidence)
+ end
+ [ :predicted_values, :actual_values, :confidence_values ].each do |key|
+ arr = []
+ valid_indices.each{|i| arr << data[key][i]}
+ data[key] = arr
+ end
+ end
+ Predictions.new( data[:predicted_values], data[:actual_values], data[:confidence_values],
+ data[:feature_type], data[:accept_values] )
+ end
+
def initialize( predicted_values,
actual_values,
confidence_values,
@@ -280,6 +302,15 @@ module Lib
return res
end
+ # returns acutal values for a certain prediction
+ def confusion_matrix_row(predicted_class_index)
+ r = []
+ (0..@num_classes-1).each do |actual|
+ r << @confusion_matrix[actual][predicted_class_index]
+ end
+ return r
+ end
+
def area_under_roc(class_index=nil)
return prediction_feature_value_map( lambda{ |i| area_under_roc(i) } ) if
class_index==nil
@@ -742,6 +773,10 @@ module Lib
@conf_provided
end
+ def min_confidence
+ @confidence_values[-1]
+ end
+
###################################################################################################################
#def compound(instance_index)
diff --git a/lib/validation_db.rb b/lib/validation_db.rb
index be004fb..f770dc2 100755
--- a/lib/validation_db.rb
+++ b/lib/validation_db.rb
@@ -72,6 +72,7 @@ module Validation
attribute :classification_statistics_yaml
attribute :regression_statistics_yaml
attribute :finished
+ attribute :prediction_data
index :model_uri
index :validation_type
diff --git a/validation/validation_application.rb b/validation/validation_application.rb
index 4b2a2d9..0647b10 100755
--- a/validation/validation_application.rb
+++ b/validation/validation_application.rb
@@ -190,6 +190,15 @@ get '/crossvalidation/:id/statistics' do
end
end
+get '/crossvalidation/:id/statistics/probabilities' do
+
+ LOGGER.info "get crossvalidation statistics for crossvalidation with id "+params[:id].to_s
+ v = Validation::Validation.from_cv_statistics( params[:id], @subjectid )
+ props = v.probabilities(params[:confidence].to_s.to_f,params[:prediction].to_s)
+ content_type "text/x-yaml"
+ props.to_yaml
+end
+
delete '/crossvalidation/:id/?' do
LOGGER.info "delete crossvalidation with id "+params[:id].to_s
content_type "text/plain"
@@ -570,6 +579,22 @@ post '/validate_datasets' do
return_task(task)
end
+get '/:id/probabilities' do
+ LOGGER.info "get validation probabilities "+params.inspect
+
+ begin
+ validation = Validation::Validation.get(params[:id])
+ rescue ActiveRecord::RecordNotFound => ex
+ raise OpenTox::NotFoundError.new "Validation '#{params[:id]}' not found."
+ end
+ validation.subjectid = @subjectid
+ raise OpenTox::BadRequestError.new "Validation '"+params[:id].to_s+"' not finished" unless validation.finished
+ props = validation.probabilities(params[:confidence].to_s.to_f,params[:prediction].to_s)
+ content_type "text/x-yaml"
+ props.to_yaml
+end
+
+
get '/:id/predictions' do
LOGGER.info "get validation predictions "+params.inspect
begin
diff --git a/validation/validation_service.rb b/validation/validation_service.rb
index 8dc90e2..2b8a18f 100755
--- a/validation/validation_service.rb
+++ b/validation/validation_service.rb
@@ -72,6 +72,7 @@ module Validation
v.crossvalidation_id = crossvalidation.id
v.crossvalidation_fold = vals.collect{ |vv| vv.crossvalidation_fold }.uniq.join(";")
v.real_runtime = vals.collect{ |vv| vv.real_runtime }.uniq.join(";")
+ v.prediction_data = prediction.data.to_yaml
v.save
end
waiting_task.progress(100) if waiting_task
@@ -236,7 +237,8 @@ module Validation
LOGGER.debug "computing prediction stats"
prediction = Lib::OTPredictions.new( feature_type,
self.test_dataset_uri, self.test_target_dataset_uri, self.prediction_feature,
- self.prediction_dataset_uri, predicted_variable, predicted_confidence, self.subjectid, OpenTox::SubTask.create(task, 0, 80) )
+ self.prediction_dataset_uri, predicted_variable, predicted_confidence, self.subjectid,
+ OpenTox::SubTask.create(task, 0, 80) )
#reading datasets and computing the main stats is 80% the work
unless dry_run
@@ -261,6 +263,7 @@ module Validation
:percent_without_class => prediction.percent_without_class,
:num_unpredicted => prediction.num_unpredicted,
:percent_unpredicted => prediction.percent_unpredicted,
+ :prediction_data => prediction.data.to_yaml,
:finished => true
raise unless self.valid?
end
@@ -268,6 +271,31 @@ module Validation
task.progress(100) if task
prediction
end
+
+
+ def probabilities( confidence, prediction )
+ raise OpenTox::BadRequestError.new "Only supported for classification" if classification_statistics==nil
+ raise OpenTox::BadRequestError.new("illegal confidence value #{confidence}") if !confidence.is_a?(Numeric) or confidence<0 or confidence>1
+
+ p_data = YAML.load(self.prediction_data.to_s)
+ raise OpenTox::BadRequestError.new("probabilities method works only for new validations - prediction data missing") unless p_data
+ raise OpenTox::BadRequestError.new("illegal prediction value: '"+prediction+"', available: "+
+ p_data[:accept_values].inspect) if p_data[:accept_values].index(prediction)==nil
+
+ p = Lib::Predictions.from_data(p_data, confidence, p_data[:accept_values].index(prediction))
+ raise OpenTox::BadRequestError("no confidence values available") unless p.confidence_values_available?
+
+ prediction_counts = p.confusion_matrix_row( p_data[:accept_values].index(prediction) )
+ sum = 0
+ prediction_counts.each{|v| sum+=v}
+
+ probs = {}
+ p_data[:accept_values].size.times do |i|
+ probs[p_data[:accept_values][i]] = prediction_counts[i]/sum.to_f
+ end
+ probs
+ {:probs => probs, :num_predictions => sum, :min_confidence => p.min_confidence}
+ end
end
class Crossvalidation