From 2d4ce39cb1b489e26b0d6d96026054566a4f77b9 Mon Sep 17 00:00:00 2001 From: "helma@in-silico.ch" Date: Tue, 30 Oct 2018 21:11:04 +0100 Subject: dataset merge --- lib/dataset.rb | 45 ++++++++++++++++++++++++++++++++++++--------- lib/feature.rb | 8 +++++++- lib/model.rb | 10 ++++++++++ test/dataset.rb | 31 ++++++------------------------- test/use_cases.rb | 17 ++++++++++++----- 5 files changed, 71 insertions(+), 40 deletions(-) diff --git a/lib/dataset.rb b/lib/dataset.rb index 4543e42..46a83d7 100644 --- a/lib/dataset.rb +++ b/lib/dataset.rb @@ -94,6 +94,12 @@ module OpenTox features.select{|f| f._type.match("Prediction")} end + # Get nominal and numeric merged features + # @return [Array] + def merged_features + features.select{|f| f._type.match("Merged")} + end + # Writers # Add a value for a given substance and feature @@ -425,27 +431,48 @@ module OpenTox end chunks end -=begin + # Merge an array of datasets # @param [Array] datasets to be merged # @return [OpenTox::Dataset] merged dataset - def self.merge datasets: datasets, features: features, value_maps: value_maps, keep_original_features: keep_original_features, remove_duplicates: remove_duplicates + def self.merge datasets: , features: , value_maps: , keep_original_features: , remove_duplicates: dataset = self.create(:source => datasets.collect{|d| d.id.to_s}.join(", "), :name => datasets.collect{|d| d.name}.uniq.join(", ")+" merged") - datasets.each_with_index do |d,i| + + datasets.each do |d| dataset.data_entries += d.data_entries dataset.warnings += d.warnings - end + end if keep_original_features + feature_classes = features.collect{|f| f.class}.uniq + merged_feature = nil if feature_classes.size == 1 - if features.first.nominal? - merged_feature = MergedNominalBioActivity.find_or_create_by(:name => features.collect{|f| f.name} + " (merged)", :original_feature_id => feature.id, :transformation => map, :accept_values => map.values.sort) - compounds.each do |c| - values(c,feature).each { |v| dataset.add c, new_feature, map[v] } + if features.first.kind_of? NominalFeature + merged_feature = MergedNominalBioActivity.find_or_create_by(:name => features.collect{|f| f.name}.uniq.join(", ") + " merged", :original_feature_ids => features.collect{|f| f.id}, :transformations => value_maps) + else + merged_feature = MergedNumericBioActivity.find_or_create_by(:name => features.collect{|f| f.name} + " merged", :original_feature_ids => features.collect{|f| f.id}) # TODO, :transformations + end + else + bad_request_error "Cannot merge features of different types (#{feature_classes})." end + + accept_values = [] + features.each_with_index do |f,i| + dataset.data_entries += datasets[i].data_entries.select{|de| de[1] == f.id}.collect do |de| + value_maps[i] ? v = value_maps[i][de[2]] : v = de[2] + accept_values << v + [de[0],merged_feature.id,v] + end + end + + if merged_feature.is_a? MergedNominalBioActivity + merged_feature.accept_values = accept_values.uniq.sort + merged_feature.save + end + + dataset.data_entries.uniq! if remove_duplicates dataset.save dataset end -=end # Change nominal feature values # @param [NominalFeature] Original feature diff --git a/lib/feature.rb b/lib/feature.rb index 50bbc42..b474398 100644 --- a/lib/feature.rb +++ b/lib/feature.rb @@ -34,7 +34,13 @@ module OpenTox end # Merged nominal biological activity - class MergedNominalBioActivity < NominalFeature + class MergedNominalBioActivity < NominalBioActivity + field :original_feature_ids, type: Array + field :transformations, type: Array + end + + # Merged numeric biological activity + class MergedNumericBioActivity < NumericBioActivity field :original_feature_ids, type: Array end diff --git a/lib/model.rb b/lib/model.rb index 6d5cf7b..f50fcd7 100644 --- a/lib/model.rb +++ b/lib/model.rb @@ -487,6 +487,16 @@ module OpenTox end # TODO from_pubchem_aid + def self.from_dataset training_dataset: , prediction_feature: , species: , endpoint: + model_validation = Model::Validation.create species: species, endpoint: endpoint + p "create model" + model = Lazar.create training_dataset: training_dataset, prediction_feature: prediction_feature + model_validation[:model_id] = model.id + p "create_crossvalidations" + model_validation[:repeated_crossvalidation_id] = OpenTox::Validation::RepeatedCrossValidation.create(model).id # full class name required + model_validation.save + model_validation + end # Create and validate a lazar model from a csv file with training data and a json file with metadata # @param [File] CSV file with two columns. The first line should contain either SMILES or InChI (first column) and the endpoint (second column). The first column should contain either the SMILES or InChI of the training compounds, the second column the training compounds toxic activities (qualitative or quantitative). Use -log10 transformed values for regression datasets. Add metadata to a JSON file with the same basename containing the fields "species", "endpoint", "source" and "unit" (regression only). You can find example training data at https://github.com/opentox/lazar-public-data. diff --git a/test/dataset.rb b/test/dataset.rb index 8018dd2..70d26d2 100644 --- a/test/dataset.rb +++ b/test/dataset.rb @@ -190,37 +190,18 @@ class DatasetTest < MiniTest::Test assert_equal d.id.to_s, copy.source end - def test_map - skip - d = Dataset.from_csv_file("#{DATA_DIR}/hamster_carcinogenicity.csv") - assert_equal 1, d.bioactivity_features.size - map = {"true" => "carcinogen", "false" => "non-carcinogen"} - mapped = d.map(d.bioactivity_features.first, map) - c = d.compounds.sample - assert_equal d.values(c,d.bioactivity_features.first).collect{|v| map[v]}, mapped.values(c,mapped.transformed_bioactivity_features.first) - assert_equal d.values(c,d.original_id_features.first), mapped.values(c,mapped.original_id_features.first) - assert_equal d.bioactivity_features.first.name, mapped.bioactivity_features.first.name - assert_equal ["carcinogen","non-carcinogen"], mapped.transformed_bioactivity_features.first.accept_values - end - def test_merge - skip kazius = Dataset.from_sdf_file "#{DATA_DIR}/cas_4337.sdf" hansen = Dataset.from_csv_file "#{DATA_DIR}/hansen.csv" efsa = Dataset.from_csv_file "#{DATA_DIR}/efsa.csv" - #p "mapping hansen" - #hansen_mapped = hansen.map hansen.bioactivity_features.first, {"1" => "mutagen", "0" => "nonmutagen"} - #p "mapping efsa" - #efsa_mapped = efsa.map efsa.bioactivity_features.first, {"1" => "mutagen", "0" => "nonmutagen"} - #datasets = [kazius,hansen_mapped,efsa_mapped] datasets = [kazius,hansen,efsa] - d = Dataset.merge datasets#, datasets.collect{|d| d.bioactivity_features}.flatten.uniq - assert_equal 8281, d.compounds.size + map = {"1" => "mutagen", "0" => "nonmutagen"} + dataset = Dataset.merge datasets: datasets, features: datasets.collect{|d| d.bioactivity_features.first}, value_maps: [nil,map,map], keep_original_features: true, remove_duplicates: false + assert_equal 8281, dataset.compounds.size + assert_equal 9, dataset.features.size c = Compound.from_smiles("C/C=C/C=O") - assert_equal ["mutagen"], d.values(c,d.bioactivity_features.first) - assert_equal datasets.collect{|d| d.id.to_s}.join(", "), d.source - assert_equal 8, d.features.size - File.open("tmp.csv","w+"){|f| f.puts d.to_csv} + assert_equal ["mutagen"], dataset.values(c,dataset.merged_features.first) + #File.open("tmp.csv","w+"){|f| f.puts d.to_csv} end # serialisation diff --git a/test/use_cases.rb b/test/use_cases.rb index 15e65a3..4959f16 100644 --- a/test/use_cases.rb +++ b/test/use_cases.rb @@ -3,18 +3,25 @@ require_relative "setup.rb" class UseCasesTest < MiniTest::Test def test_PA - skip kazius = Dataset.from_sdf_file "#{DATA_DIR}/cas_4337.sdf" hansen = Dataset.from_csv_file "#{DATA_DIR}/hansen.csv" efsa = Dataset.from_csv_file "#{DATA_DIR}/efsa.csv" datasets = [kazius,hansen,efsa] - map = {"true" => "carcinogen", "false" => "non-carcinogen"} + map = {"1" => "mutagen", "0" => "nonmutagen"} + p "merging" training_dataset = Dataset.merge datasets: datasets, features: datasets.collect{|d| d.bioactivity_features.first}, value_maps: [nil,map,map], keep_original_features: false, remove_duplicates: true - model = Model::Validation.create training_dataset: training_dataset, species: "Salmonella typhimurium", endpoint: "Mutagenicity" + assert_equal 8281, training_dataset.compounds.size + p training_dataset.features.size + p training_dataset.id + training_dataset = Dataset.find('5bd8ac8fca62695d767fca6b') + p "create model_validation" + model_validation = Model::Validation.from_dataset training_dataset: training_dataset, prediction_feature: training_dataset.merged_features.first, species: "Salmonella typhimurium", endpoint: "Mutagenicity" + p model_validation.id + p "predict" pa = Dataset.from_sdf_file "#{DATA_DIR}/PA.sdf" - prediction_dataset = model.predict pa + prediction_dataset = model_dataset.predict pa + p prediction_dataset.id puts prediction_dataset.to_csv - assert_equal 8281, d.compounds.size end def test_public_models -- cgit v1.2.3