summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormguetlein <martin.guetlein@gmail.com>2012-04-02 16:13:29 +0200
committermguetlein <martin.guetlein@gmail.com>2012-04-02 16:13:29 +0200
commitd809fed6b67cf3d9c66b7d23de9392de9801c3b0 (patch)
treea4bdd3470158397eb47cad3f41c943efa4410032
parent3973b960ae2aa6bed540309e275f17e8fddc4567 (diff)
add metadta when splitting, add non-split-features to split-result-dataset, add k-fold-split method for crossvalidation, add sammon plotting
-rw-r--r--lib/r-util.rb89
-rw-r--r--lib/stratification.R78
2 files changed, 137 insertions, 30 deletions
diff --git a/lib/r-util.rb b/lib/r-util.rb
index 04f96f4..0d4e82c 100644
--- a/lib/r-util.rb
+++ b/lib/r-util.rb
@@ -176,22 +176,68 @@ module OpenTox
end
end
- # stratified splits a dataset into two dataset the feature values
+ # stratified splits a dataset into two dataset according to the feature values
# all features are taken into account unless <split_features> is given
- def stratified_split( dataset, missing_values="NA", pct=0.3, subjectid=nil, seed=42, split_features=nil )
+ # returns two datases
+ def stratified_split( dataset, metadata={}, missing_values="NA", pct=0.3, subjectid=nil, seed=42, split_features=nil )
+ stratified_split_internal( dataset, metadata, missing_values, nil, pct, subjectid, seed, split_features )
+ end
+
+ # stratified splits a dataset into k datasets according the feature values
+ # all features are taken into account unless <split_features> is given
+ # returns two arrays of datasets
+ def stratified_k_fold_split( dataset, metadata={}, missing_values="NA", num_folds=10, subjectid=nil, seed=42, split_features=nil )
+ stratified_split_internal( dataset, metadata, missing_values, num_folds, nil, subjectid, seed, split_features )
+ end
+
+ private
+ def stratified_split_internal( dataset, metadata={}, missing_values="NA", num_folds=nil, pct=nil, subjectid=nil, seed=42, split_features=nil )
+ raise "internal error" if num_folds!=nil and pct!=nil
+ k_fold_split = num_folds!=nil
+ if k_fold_split
+ raise "num_folds not a fixnum: #{num_folds}" unless num_folds.is_a?(Fixnum)
+ else
+ raise "pct is not a numeric: #{pct}" unless pct.is_a?(Numeric)
+ end
raise "not a loaded ot-dataset" unless dataset.is_a?(OpenTox::Dataset) and dataset.compounds.size>0 and dataset.features.size>0
raise "missing_values=#{missing_values}" unless missing_values.is_a?(String) or missing_values==0
- raise "pct=#{pct}" unless pct.is_a?(Numeric)
raise "subjectid=#{subjectid}" unless subjectid==nil or subjectid.is_a?(String)
LOGGER.debug("r-util> apply stratified split to #{dataset.uri}")
- df = dataset_to_dataframe( dataset, missing_values, subjectid, split_features )
+ df = dataset_to_dataframe( dataset, missing_values, subjectid)
@r.eval "set.seed(#{seed})"
- @r.eval "split <- stratified_split(#{df}, ratio=#{pct})"
- split = @r.pull 'split'
- split = split.collect{|s| 1-s.to_i} # reverse 1s and 0s, as 1 means selected, but 0 will be first set
- split_to_datasets( df, split, subjectid )
+ str_split_features = ""
+ if split_features
+ @r.split_features = split_features if split_features
+ str_split_features = "colnames=split_features"
+ end
+ @r.eval "save.image(\"/tmp/image.R\")"
+
+ if k_fold_split
+ @r.eval "split <- stratified_k_fold_split(#{df}, num_folds=#{num_folds}, #{str_split_features})"
+ split = @r.pull 'split'
+ train = []
+ test = []
+ num_folds.times do |f|
+ datasetname = 'dataset fold '+(f+1).to_s+' of '+num_folds.to_s
+ metadata[DC.title] = "training "+datasetname
+ train << split_to_dataset( df, split, metadata, subjectid ){ |i| i!=(f+1) }
+ metadata[DC.title] = "test "+datasetname
+ test << split_to_dataset( df, split, metadata, subjectid ){ |i| i==(f+1) }
+ end
+ return train, test
+ else
+ puts "split <- stratified_split(#{df}, ratio=#{pct}, #{str_split_features})"
+ @r.eval "split <- stratified_split(#{df}, ratio=#{pct}, #{str_split_features})"
+ split = @r.pull 'split'
+ metadata[DC.title] = "Training dataset split of "+dataset.uri
+ train = split_to_dataset( df, split, metadata, subjectid ){ |i| i==1 }
+ metadata[DC.title] = "Test dataset split of "+dataset.uri
+ test = split_to_dataset( df, split, metadata, subjectid ){ |i| i==0 }
+ return train, test
+ end
end
+ public
# dataset should be loaded completely (use Dataset.find)
# takes duplicates into account
@@ -277,17 +323,18 @@ module OpenTox
# converts a dataframe into a dataset (a new dataset is created at the dataset webservice)
# this is only possible if a superset of the dataframe was created by dataset_to_dataframe (metadata and URIs!)
- def dataframe_to_dataset( df, subjectid=nil )
- dataframe_to_dataset_indices( df, subjectid, nil)
+ def dataframe_to_dataset( df, metadata={}, subjectid=nil )
+ dataframe_to_dataset_indices( df, metadata, subjectid, nil)
end
private
- def dataframe_to_dataset_indices( df, subjectid=nil, compound_indices=nil )
+ def dataframe_to_dataset_indices( df, metadata={}, subjectid=nil, compound_indices=nil )
raise unless @@feats[df].size>0
values, compound_names, features = pull_dataframe(df)
compounds = compound_names.collect{|c| c.split("$")[0]}
features.each{|f| raise unless @@feats[df][f]}
dataset = OpenTox::Dataset.create(CONFIG[:services]["opentox-dataset"],subjectid)
+ dataset.add_metadata(metadata)
LOGGER.debug "r-util> convert dataframe to dataset #{dataset.uri}"
compounds.size.times{|i| dataset.add_compound(compounds[i]) if compound_indices==nil or compound_indices.include?(i)}
features.each{|f| dataset.add_feature(f,@@feats[df][f])}
@@ -304,16 +351,12 @@ module OpenTox
dataset
end
- def split_to_datasets( df, split, subjectid=nil )
- sets = []
- (split.min.to_i .. split.max.to_i).each do |i|
- indices = []
- split.size.times{|j| indices<<j if split[j]==i}
- dataset = dataframe_to_dataset_indices( df, subjectid, indices )
- LOGGER.debug("r-util> split into #{dataset.uri}, c:#{dataset.compounds.size}, f:#{dataset.features.size}")
- sets << dataset
- end
- sets
+ def split_to_dataset( df, split, metadata={}, subjectid=nil )
+ indices = []
+ split.size.times{|i| indices<<i if yield(split[i]) }
+ dataset = dataframe_to_dataset_indices( df, metadata, subjectid, indices )
+ LOGGER.debug("r-util> split into #{dataset.uri}, c:#{dataset.compounds.size}, f:#{dataset.features.size}")
+ dataset
end
def pull_dataframe(df)
@@ -337,8 +380,8 @@ module OpenTox
end
def assign_dataframe(df,input,rownames,colnames)
- rownames.check_uniq
- colnames.check_uniq
+ rownames.check_uniq if rownames
+ colnames.check_uniq if colnames
tmp = File.join(Dir.tmpdir,Time.new.to_f.to_s+"_"+rand(10000).to_s+".csv")
file = File.new(tmp, 'w')
input.each{|i| file.puts(i.collect{|e| "\"#{e}\""}.join("#")+"\n")}
diff --git a/lib/stratification.R b/lib/stratification.R
index 76ff2d8..3f8698c 100644
--- a/lib/stratification.R
+++ b/lib/stratification.R
@@ -1,4 +1,13 @@
+round_it <- function( x )
+{
+ if(isTRUE((x - floor(x))>=0.5))
+ ceiling(x)
+ else
+ floor(x)
+}
+
+
nominal_to_binary <- function( data )
{
result = NULL
@@ -41,9 +50,13 @@ nominal_to_binary <- function( data )
result
}
-process_data <- function( data )
+process_data <- function( data, colnames=NULL )
{
data.num <- as.data.frame(data)
+ if (!is.null(colnames))
+ {
+ data.num = subset(data.num, select = colnames)
+ }
if (!is.numeric(data.num))
{
data.num = nominal_to_binary(data.num)
@@ -72,14 +85,15 @@ cluster <- function( data, min=10, max=15 )
cbind(s$partition[,m])
}
-stratified_split <- function( data, ratio=0.3, method="cluster" )
+stratified_split <- function( data, ratio=0.3, method="cluster", colnames=NULL )
{
- data.processed = as.matrix(process_data( data ))
+ data.processed = as.matrix(process_data( data, colnames ))
+ print(paste("split using #features: ",ncol(data.processed)))
if (method == "samplecube")
{
require("sampling")
# adjust ratio to make samplecube return exact number of samples
- ratio = round(nrow(data.processed)*ratio)/nrow(data.processed)
+ ratio = round_it(nrow(data.processed)*ratio)/nrow(data.processed)
pik = rep(ratio,times=nrow(data.processed))
data.strat = cbind(pik,data.processed)
samplecube(data.strat,pik,order=2,comment=F)
@@ -101,10 +115,11 @@ stratified_split <- function( data, ratio=0.3, method="cluster" )
stop("unknown method")
}
-stratified_k_fold_split <- function( data, num_folds=10, method="cluster" )
+stratified_k_fold_split <- function( data, num_folds=10, method="cluster", colnames=NULL )
{
print(paste(num_folds,"-fold-split, data-size",nrow(data)))
- data.processed = as.matrix(process_data( data ))
+ data.processed = as.matrix(process_data( data, colnames ))
+ print(paste("split using #features: ",ncol(data.processed)))
if (method == "samplecube")
{
folds = rep(0, times=nrow(data))
@@ -133,7 +148,7 @@ stratified_k_fold_split <- function( data, num_folds=10, method="cluster" )
{
require("TunePareto")
cl = cluster(data.processed)
- res = generateCVRuns(cl,ntimes=1,nfold=3)
+ res = generateCVRuns(cl,ntimes=1,nfold=num_folds)
folds = rep(0, times=nrow(data))
for (i in 1:num_folds)
for(j in 1:length(res[[1]][[i]]))
@@ -144,6 +159,50 @@ stratified_k_fold_split <- function( data, num_folds=10, method="cluster" )
stop("unknown method")
}
+duplicate_indices <- function( data ) {
+ indices = 1:nrow(data)
+ z = data
+ duplicate_index = anyDuplicated(z)
+ while(duplicate_index) {
+ duplicate_to_index = anyDuplicated(z[1:duplicate_index,],fromLast=T)
+ #print(paste(duplicate_index,'is dupl to',duplicate_to_index))
+ indices[duplicate_index] <- duplicate_to_index
+ z[duplicate_index,] <- paste('123$ยง%',duplicate_index)
+ duplicate_index = anyDuplicated(z)
+ }
+ indices
+}
+
+add_duplicates <- function( data, dup_indices ) {
+ result = data[1,]
+ for(i in 2:length(dup_indices)) {
+ row = data[rownames(data)==dup_indices[i],]
+ if(length(row)==0)
+ stop(paste('index ',i,' dup-index ',dup_indices[i],'not found in data'))
+ result = rbind(result, row)
+ }
+ rownames(result)<-NULL
+ result
+}
+
+sammon_duplicates <- function( data, ... ) {
+ di <- duplicate_indices(data)
+ print(di)
+ u <- unique(data)
+ print(paste('unique data points',nrow(u),'of',nrow(data)))
+ if(nrow(u) <= 4) stop("number of unqiue datapoints <= 4")
+ points_unique <- sammon(dist(u), ...)$points
+ if (nrow(u)<nrow(data))
+ {
+ points <- add_duplicates(points_unique, di)
+ points
+ }
+ else
+ {
+ points_unique
+ }
+}
+
plot_pre_process <- function( data, method="pca" )
{
data.processed = process_data( data )
@@ -158,6 +217,11 @@ plot_pre_process <- function( data, method="pca" )
data.emb <- smacofSym(dist(data.processed, method = "euclidean"), ndim=2, verbose=T)
data.emb$conf
}
+ else if (method == "sammon")
+ {
+ require("MASS")
+ sammon_duplicates(data.processed, k=2)
+ }
else
stop("unknown method")
}