summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorhelma@in-silico.ch <helma@in-silico.ch>2018-10-31 14:50:42 +0100
committerhelma@in-silico.ch <helma@in-silico.ch>2018-10-31 14:50:42 +0100
commit5b08a8c6d8e5567d253bec92d5bf5d18fd040cdc (patch)
tree9cd4bf4a79ff09771e51bafbc828a088d975bf66 /test
parent2d4ce39cb1b489e26b0d6d96026054566a4f77b9 (diff)
pubchem import for openrisknet
Diffstat (limited to 'test')
-rw-r--r--test/use_cases.rb50
1 files changed, 33 insertions, 17 deletions
diff --git a/test/use_cases.rb b/test/use_cases.rb
index 4959f16..4e072d8 100644
--- a/test/use_cases.rb
+++ b/test/use_cases.rb
@@ -3,27 +3,43 @@ require_relative "setup.rb"
class UseCasesTest < MiniTest::Test
def test_PA
- kazius = Dataset.from_sdf_file "#{DATA_DIR}/cas_4337.sdf"
- hansen = Dataset.from_csv_file "#{DATA_DIR}/hansen.csv"
- efsa = Dataset.from_csv_file "#{DATA_DIR}/efsa.csv"
- datasets = [kazius,hansen,efsa]
- map = {"1" => "mutagen", "0" => "nonmutagen"}
- p "merging"
- training_dataset = Dataset.merge datasets: datasets, features: datasets.collect{|d| d.bioactivity_features.first}, value_maps: [nil,map,map], keep_original_features: false, remove_duplicates: true
- assert_equal 8281, training_dataset.compounds.size
- p training_dataset.features.size
- p training_dataset.id
- training_dataset = Dataset.find('5bd8ac8fca62695d767fca6b')
+ #kazius = Dataset.from_sdf_file "#{DATA_DIR}/cas_4337.sdf"
+ #hansen = Dataset.from_csv_file "#{DATA_DIR}/hansen.csv"
+ #efsa = Dataset.from_csv_file "#{DATA_DIR}/efsa.csv"
+ #datasets = [kazius,hansen,efsa]
+ #map = {"1" => "mutagen", "0" => "nonmutagen"}
+ #p "merging"
+ #training_dataset = Dataset.merge datasets: datasets, features: datasets.collect{|d| d.bioactivity_features.first}, value_maps: [nil,map,map], keep_original_features: false, remove_duplicates: true
+ #assert_equal 8281, training_dataset.compounds.size
+ #p training_dataset.features.size
+ #p training_dataset.id
+ #training_dataset = Dataset.find('5bd8ac8fca62695d767fca6b')
+ #training_dataset = Dataset.find('5bd8bbadca62695f69e7a33b')
+ #puts training_dataset.to_csv
p "create model_validation"
- model_validation = Model::Validation.from_dataset training_dataset: training_dataset, prediction_feature: training_dataset.merged_features.first, species: "Salmonella typhimurium", endpoint: "Mutagenicity"
- p model_validation.id
- p "predict"
- pa = Dataset.from_sdf_file "#{DATA_DIR}/PA.sdf"
- prediction_dataset = model_dataset.predict pa
- p prediction_dataset.id
+ #model_validation = Model::Validation.from_dataset training_dataset: training_dataset, prediction_feature: training_dataset.merged_features.first, species: "Salmonella typhimurium", endpoint: "Mutagenicity"
+ #p model_validation.id
+ #model_validation = Model::Validation.find '5bd8df47ca6269604590ab38'
+ #p "predict"
+ #pa = Dataset.from_sdf_file "#{DATA_DIR}/PA.sdf"
+ #prediction_dataset = model_validation.predict pa
+ #p prediction_dataset.id
+ prediction_dataset = Dataset.find('5bd98b88ca6269609aab79f4')
puts prediction_dataset.to_csv
end
+ def test_tox21
+ training_dataset = Dataset.from_pubchem_aid 743122
+ p training_dataset.id
+ #'5bd9a1dbca626969d97fb421'
+ File.open("AID743122.csv","w+"){|f| f.puts training_dataset.to_csv}
+ model = Model::Lazar.create training_dataset: training_dataset
+ p model.id
+ #p Model::Lazar.find('5bd9a70bca626969d97fc9df')
+ model_validation = Model::Validation.from_dataset training_dataset: training_dataset, prediction_feature: training_dataset.bioactivity_features.first, species: "Human HG2L7.5c1 cell line", endpoint: "aryl hydrocarbon receptor (AhR) signaling pathway activation"
+ p model_validation.id
+ end
+
def test_public_models
skip
=begin