summaryrefslogtreecommitdiff
path: root/lib/dataset.rb
blob: 8cb343f93b2d6dc55573fb5445185d5f2158edf3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
require 'matrix'

class Dataset

  def initialize file
    @dir = File.dirname file
    @dependent_variable_type = File.read(File.join(@dir,"dependent_variable_type")).chomp
    if @dependent_variable_type == "binary"
      @dependent_variable_values = {}
      File.readlines(File.join(@dir,"dependent_variable_values")).each_with_index{|v,i| @dependent_variable_values[v.chomp] = i}
    end
    @independent_variable_type = File.read(File.join(@dir,"independent_variable_type")).chomp
    @lines = File.readlines(file)
    @header = @lines.shift.split(",")
    @header.first.match(/ID/i) ? @has_id = true : @has_id = false
    @dependent_variable_name = @header.pop
    @ids = []
    @dependent_variables = []
    @independent_variables = []
    @independent_variable_names = []
  end

  def print_variables
    File.open(File.join(@dir,"ids"),"w+") { |f| f.puts @ids.join("\n") }
    File.open(File.join(@dir,"dependent_variable_name"),"w+") { |f| f.puts @dependent_variable_name }
    File.open(File.join(@dir,"dependent_variables"),"w+") { |f| f.puts @dependent_variables.join("\n") }
    File.open(File.join(@dir,"independent_variable_names"),"w+") { |f| f.puts @independent_variable_names.join(",") }
    File.open(File.join(@dir,"independent_variables"),"w+") { |f| @independent_variables.each{|row| f.puts row.join(",")} }
  end

  def scale_independent_variables file
    @header.shift if @has_id
    @independent_variable_names = @header
    @lines.each_with_index do |line,i|
      items = line.chomp.split(",")
      @ids << items.shift
      if @dependent_variable_type == "binary"
        @dependent_variables << @dependent_variable_values[items.pop]
      elsif @dependent_variable_type == "numeric"
        @dependent_variables << items.pop.to_f
      end
      @independent_variables << items.collect{|i| i.to_f}
    end
    @independent_variables = Matrix[ *@independent_variables ]
    columns = @independent_variables.column_vectors
    @independent_variable_means = columns.collect{|c| c.to_a.mean}
    @independent_variable_standard_deviations = columns.collect{|c| c.to_a.standard_deviation}
    scaled_columns = []
    columns.each_with_index{|col,i| scaled_columns << col.collect{|v| v ? (v-@independent_variable_means[i])/@independent_variable_standard_deviations[i] : nil}}
    @independent_variables = Matrix.columns(scaled_columns).to_a
    print_variables
    File.open(File.join(@dir,"means"),"w+") { |f| f.puts @independent_variable_means.join(",") }
    File.open(File.join(@dir,"standard_deviations"),"w+") { |f| f.puts @independent_variable_standard_deviations.join(",") }
  end

  def fingerprint_independent_variables file, fingerprint_type="MP2D"
    fingerprints = []
    @lines.each_with_index do |line,i|
      items = line.chomp.split(",")
      @has_id ? @ids << items.shift : @ids << i
      if @dependent_variable_type == "binary"
        @dependent_variables << @dependent_variable_values[items.pop]
      elsif @dependent_variable_type == "numeric"
        @dependent_variables << items.pop.to_f
      end
      @independent_variables << [items[0]] + Compound.new(items[0]).fingerprint(fingerprint_type)
    end
    @independent_variable_names = ["Canonical Smiles"] + fingerprints.flatten.sort.uniq
    print_variables
  end
end

=begin
    # Create a dataset from SDF file 
    #   files with a single data field are read as BioActivities (i.e. dependent variable)
    #   files with multiple data fields are read as SubstanceProperties (i.e. independent variable)
    # @param [File] 
    # @return [OpenTox::Dataset]
    def self.from_sdf_file file
      md5 = Digest::MD5.hexdigest(File.read(file)) # use hash to identify identical files
      dataset = self.find_by(:md5 => md5)
      if dataset
        $logger.debug "Found #{file} in the database (id: #{dataset.id}, md5: #{dataset.md5}), skipping import."
      else
        $logger.debug "Parsing #{file}."

        dataset = self.new(:source => file, :name => File.basename(file,".*"), :md5 => md5)
        original_id = OriginalId.find_or_create_by(:dataset_id => dataset.id,:name => dataset.name+".ID")

        read_result = false
        sdf = ""
        feature_name = ""
        compound = nil
        features = {}
        table = [["ID","SMILES"]]

        File.readlines(file).each do |line|
          if line.match %r{\$\$\$\$}
            sdf << line
            id = sdf.split("\n").first.chomp
            compound = Compound.from_sdf sdf
            row = [id,compound.smiles]
            features.each do |f,v|
              table[0] << f unless table[0].include? f
              row[table[0].index(f)] = v
            end
            table << row
            sdf = ""
            features = {}
          elsif line.match /^>\s+</
            feature_name = line.match(/^>\s+<(.*)>/)[1]
            read_result = true
          else
            if read_result
              value = line.chomp
              features[feature_name] = value
              read_result = false
            else
              sdf << line
            end
          end
        end
        dataset.parse_table table
      end
      dataset
    end

    # Create a dataset from PubChem Assay
    # @param [Integer] PubChem AssayID (AID)
    # @return [OpenTox::Dataset]
    def self.from_pubchem_aid aid
      # TODO get regression data
      aid_url = File.join PUBCHEM_URI, "assay/aid/#{aid}"
      assay_metadata = JSON.parse(RestClientWrapper.get(File.join aid_url,"description/JSON").to_s)["PC_AssayContainer"][0]["assay"]["descr"]
      name = assay_metadata["name"].gsub(/\s+/,"_")
      dataset = self.new(:source => aid_url, :name => name) 
      # Get assay data in chunks
      # Assay record retrieval is limited to 10000 SIDs
      # https://pubchemdocs.ncbi.nlm.nih.gov/pug-rest-tutorial$_Toc458584435
      list = JSON.parse(RestClientWrapper.get(File.join aid_url, "sids/JSON?list_return=listkey").to_s)["IdentifierList"]
      listkey = list["ListKey"]
      size = list["Size"]
      start = 0
      csv = []
      while start < size
        url = File.join aid_url, "CSV?sid=listkey&listkey=#{listkey}&listkey_start=#{start}&listkey_count=10000"
        csv += CSV.parse(RestClientWrapper.get(url).to_s).select{|r| r[0].match /^\d/} # discard header rows
        start += 10000
      end
      table = [["SID","SMILES",name]]
      csv.each_slice(100) do |slice| # get SMILES in chunks
        cids = slice.collect{|s| s[2]}
        pubchem_cids = []
        JSON.parse(RestClientWrapper.get(File.join(PUBCHEM_URI,"compound/cid/#{cids.join(",")}/property/CanonicalSMILES/JSON")).to_s)["PropertyTable"]["Properties"].each do |prop|
          i = cids.index(prop["CID"].to_s)
          value = slice[i][3]
          if value == "Active" or value == "Inactive"
            table << [slice[i][1].to_s,prop["CanonicalSMILES"],slice[i][3].to_s]
            pubchem_cids << prop["CID"].to_s
          else
            dataset.warnings << "Ignoring CID #{prop["CID"]}/ SMILES #{prop["CanonicalSMILES"]}, because PubChem activity is #{value}."
          end
        end
        (cids-pubchem_cids).each { |cid| dataset.warnings << "Could not retrieve SMILES for CID #{cid}, all entries are ignored." }
      end
      dataset.parse_table table
      dataset
    end


    # Convert dataset to SDF format
    # @return [String] SDF string
    def to_sdf

      export_features = merged_features
      export_features = transformed_bioactivity_features if export_features.empty? 
      export_features = bioactivity_features if export_features.empty? 
      export_feature = export_features.first

      sdf = ""
      compounds.each do |compound|
        sdf_lines = compound.sdf.sub(/\$\$\$\$\n/,"").split("\n")
        sdf_lines[0] = compound.smiles
        sdf += sdf_lines.join("\n")
        sdf += "\n> <#{export_feature.name}>\n"
        sdf += values(compound,export_feature).uniq.join ","
        sdf += "\n"
        sdf += "\n$$$$\n"
      end
      sdf
    end


    # Merge an array of datasets 
    # @param [Array<OpenTox::Dataset>] datasets Datasets to be merged
    # @param [Array<OpenTox::Feature>] features Features to be merged (same size as datasets)
    # @param [Array<Hash>] value_maps Value transfomations (use nil for keeping original values, same size as dataset)
    # @param [Bool] keep_original_features Copy original features/values to the merged dataset
    # @param [Bool] remove_duplicates Delete duplicated values (assuming they come from the same experiment)
    # @return [OpenTox::Dataset] merged dataset
    def self.merge datasets: , features: , value_maps: , keep_original_features: , remove_duplicates: 
      dataset = self.create(:source => datasets.collect{|d| d.id.to_s}.join(", "), :name => datasets.collect{|d| d.name}.uniq.join(", ")+" merged")

      datasets.each do |d|
        dataset.data_entries += d.data_entries
        dataset.warnings += d.warnings
      end if keep_original_features

      feature_classes = features.collect{|f| f.class}.uniq
      merged_feature = nil
      if feature_classes.size == 1
        if features.first.kind_of? NominalFeature
          merged_feature = MergedNominalBioActivity.find_or_create_by(:name => features.collect{|f| f.name}.uniq.join(" and ") + " merged", :original_feature_ids => features.collect{|f| f.id}, :transformations => value_maps)
        else
          merged_feature = MergedNumericBioActivity.find_or_create_by(:name => features.collect{|f| f.name} + " merged", :original_feature_ids => features.collect{|f| f.id}) # TODO: regression transformations 
        end
      else
        raise ArgumentError, "Cannot merge features of different types (#{feature_classes})."
      end

      accept_values = []
      features.each_with_index do |f,i|
        dataset.data_entries += datasets[i].data_entries.select{|de| de[1] == f.id}.collect do |de|
          value_maps[i] ?  v = value_maps[i][de[2]] : v = de[2]
          accept_values << v
          [de[0],merged_feature.id,v]
        end
      end

      if merged_feature.is_a? MergedNominalBioActivity
        merged_feature.accept_values = accept_values.uniq.sort
        merged_feature.save
      end

      dataset.data_entries.uniq! if remove_duplicates
      dataset.save
      dataset
    end

  end

end
=end