summaryrefslogtreecommitdiff
path: root/validation
diff options
context:
space:
mode:
authormguetlein <martin.guetlein@gmail.com>2011-05-24 16:14:37 +0200
committermguetlein <martin.guetlein@gmail.com>2011-05-24 16:14:37 +0200
commit1c2ed2e04d5cbd1842e646e6429bcfdc0b1372dd (patch)
tree8a650344ad68343f43ad2968750ca4df7b670ace /validation
parent2317a75d3e91a6a03992a8f19d6559323146a256 (diff)
rewrite: adjust to new prediction dataset format from lazar, read confidence from ambit algorithms
Diffstat (limited to 'validation')
-rwxr-xr-xvalidation/validation_application.rb10
-rwxr-xr-xvalidation/validation_service.rb22
2 files changed, 21 insertions, 11 deletions
diff --git a/validation/validation_application.rb b/validation/validation_application.rb
index d448c62..f64f74e 100755
--- a/validation/validation_application.rb
+++ b/validation/validation_application.rb
@@ -4,6 +4,7 @@
end
require 'lib/dataset_cache.rb'
+require 'lib/feature_util.rb'
require 'validation/validation_service.rb'
get '/crossvalidation/?' do
@@ -443,21 +444,22 @@ post '/validate_datasets' do
params[:validation_type] = "validate_datasets"
if params[:model_uri]
+ raise OpenTox::BadRequestError.new "please specify 'model_uri' or set either 'classification' or 'regression' flag" if params[:classification] or params[:regression]
v = Validation::Validation.create params
v.subjectid = @subjectid
v.compute_validation_stats_with_model(nil,false,task)
else
raise OpenTox::BadRequestError.new "please specify 'model_uri' or 'prediction_feature'" unless params[:prediction_feature]
- raise OpenTox::BadRequestError.new "please specify 'model_uri' or 'predicted_feature'" unless params[:predicted_feature]
+ raise OpenTox::BadRequestError.new "please specify 'model_uri' or 'predicted_variable'" unless params[:predicted_variable]
raise OpenTox::BadRequestError.new "please specify 'model_uri' or set either 'classification' or 'regression' flag" unless
params[:classification] or params[:regression]
-
- predicted_feature = params.delete("predicted_feature")
+ predicted_variable = params.delete("predicted_variable")
+ predicted_confidence = params.delete("predicted_confidence")
feature_type = "classification" if params.delete("classification")!=nil
feature_type = "regression" if params.delete("regression")!=nil
v = Validation::Validation.create params
v.subjectid = @subjectid
- v.compute_validation_stats(feature_type,predicted_feature,nil,nil,false,task)
+ v.compute_validation_stats(feature_type,predicted_variable,predicted_confidence,nil,nil,false,task)
end
v.validation_uri
end
diff --git a/validation/validation_service.rb b/validation/validation_service.rb
index 61f3a6e..73c15df 100755
--- a/validation/validation_service.rb
+++ b/validation/validation_service.rb
@@ -46,9 +46,15 @@ module Validation
test_target_dataset_uris = vals.collect{|v| v.test_target_dataset_uri}
prediction_feature = vals.first.prediction_feature
prediction_dataset_uris = vals.collect{|v| v.prediction_dataset_uri}
- predicted_variables = models.collect{|m| m.metadata[OT.predictedVariables]}
+ predicted_variables = []
+ predicted_confidences = []
+ models.size.times do |i|
+ predicted = Lib::FeatureUtil.predicted_variables(models[i], prediction_dataset_uris[i], subjectid)
+ predicted_variables << predicted[:predicted_variable]
+ predicted_confidences << predicted[:predicted_confidence]
+ end
prediction = Lib::OTPredictions.new( feature_type, test_dataset_uris, test_target_dataset_uris, prediction_feature,
- prediction_dataset_uris, predicted_variables, subjectid )
+ prediction_dataset_uris, predicted_variables, predicted_confidences, subjectid )
v = Validation.new
case feature_type
@@ -218,13 +224,15 @@ module Validation
dependentVariables = model.metadata[OT.dependentVariables]
prediction_feature = self.prediction_feature ? nil : dependentVariables
algorithm_uri = self.algorithm_uri ? nil : model.metadata[OT.algorithm]
- predictedVariables = model.metadata[OT.predictedVariables]
- compute_validation_stats( model.feature_type(self.subjectid), predictedVariables,
+ predicted_variables = Lib::FeatureUtil.predicted_variables(model, prediction_dataset_uri, subjectid)
+ predicted_variable = predicted_variables[:predicted_variable]
+ predicted_confidence = predicted_variables[:predicted_confidence]
+ compute_validation_stats( model.feature_type(self.subjectid), predicted_variable, predicted_confidence,
prediction_feature, algorithm_uri, dry_run, task )
end
- def compute_validation_stats( feature_type, predicted_feature, prediction_feature=nil,
- algorithm_uri=nil, dry_run=false, task=nil )
+ def compute_validation_stats( feature_type, predicted_variable, predicted_confidence, prediction_feature,
+ algorithm_uri, dry_run, task )
# self.attributes = { :prediction_feature => prediction_feature } if self.prediction_feature==nil && prediction_feature
# self.attributes = { :algorithm_uri => algorithm_uri } if self.algorithm_uri==nil && algorithm_uri
@@ -237,7 +245,7 @@ module Validation
LOGGER.debug "computing prediction stats"
prediction = Lib::OTPredictions.new( feature_type,
self.test_dataset_uri, self.test_target_dataset_uri, self.prediction_feature,
- self.prediction_dataset_uri, predicted_feature, self.subjectid, OpenTox::SubTask.create(task, 0, 80) )
+ self.prediction_dataset_uri, predicted_variable, predicted_confidence, self.subjectid, OpenTox::SubTask.create(task, 0, 80) )
#reading datasets and computing the main stats is 80% the work
unless dry_run