summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Helma <helma@in-silico.ch>2019-10-28 15:56:23 +0100
committerChristoph Helma <helma@in-silico.ch>2019-10-28 15:56:23 +0100
commit5dbcec8d20b5f1fe3179877ea28cdb72d1e2ba38 (patch)
tree7de2379f76ad10db5164cf81fb663ea4d57fb973
parentebbd51ce117dc0f5912df6fa25fcce5cb7aaa5fe (diff)
discussion, conclusion, abstract
-rw-r--r--Makefile12
-rw-r--r--bibliography.bib35
-rw-r--r--figures/results.csv10
-rw-r--r--figures/roc.pngbin0 -> 76285 bytes
-rw-r--r--mutagenicity.md321
-rw-r--r--results.yaml9
-rwxr-xr-xscripts/confusion-matrix-summary.rb1
-rwxr-xr-xscripts/results2csv.rb9
-rwxr-xr-xscripts/roc.R8
9 files changed, 233 insertions, 172 deletions
diff --git a/Makefile b/Makefile
index d0fba04..59b135b 100644
--- a/Makefile
+++ b/Makefile
@@ -40,15 +40,23 @@ LAZAR_SUMMARIES = $(SUMMARIES_DIR)/lazar-all.json $(SUMMARIES_DIR)/lazar-high-co
CONFUSION_MATRICES = $(CONFUSION_MATRICES_DIR)/R-SVM.csv $(CONFUSION_MATRICES_DIR)/R-RF.csv $(CONFUSION_MATRICES_DIR)/R-DL.csv $(CONFUSION_MATRICES_DIR)/tensorflow-all.csv $(CONFUSION_MATRICES_DIR)/tensorflow-selected.csv $(CONFUSION_MATRICES_DIR)/lazar-all.csv $(CONFUSION_MATRICES_DIR)/lazar-high-confidence.csv $(CONFUSION_MATRICES_DIR)/lazar-padel-all.csv $(CONFUSION_MATRICES_DIR)/lazar-padel-high-confidence.csv
DATA = data/mutagenicity.sdf data/mutagenicity.csv data/mutagenicity-fingerprints.csv
+FIGURES = figures/roc.png
-all: $(DATA) $(TABLES) mutagenicity.pdf
-#all: $(SUMMARIES) $(DATA) $(TABLES) mutagenicity.pdf
+all: $(DATA) $(TABLES) $(FIGURES) mutagenicity.pdf
include $(PANDOC_SCHOLAR_PATH)/Makefile
export: $(DATA)
mutagenicity.mustache.md: results.yaml mutagenicity.md
mustache $^ > $@
+# figures
+
+figures/roc.png: figures/results.csv
+ scripts/roc.R
+
+figures/results.csv: results.yaml
+ scripts/results2csv.rb $< > $@
+
# tables
tables/r-summary.csv: $(R_SUMMARIES)
diff --git a/bibliography.bib b/bibliography.bib
index 7c660ea..a8f0d5e 100644
--- a/bibliography.bib
+++ b/bibliography.bib
@@ -1,3 +1,38 @@
+@article{Helma2018,
+ author = { Christoph Helma and David Vorgrimmler and Denis Gebele and Martin Gütlein and Barbara Engeli and Jürg Zarn and Benoit Schilter and Elena Lo Piparo},
+ title = "Modeling Chronic Toxicity: A comparison of experimental variability with {(Q)SAR}/read-across predictions",
+ year = "2018",
+ journal = {Frontiers in pharmacology},
+ number = 9,
+ pages = "413",
+}
+
+
+@article{Benigni1988,
+author = { R. Benigni and A. Giuliani },
+title = {Computer‐assisted analysis of interlaboratory Ames test variability},
+journal = {Journal of Toxicology and Environmental Health},
+volume = {25},
+number = {1},
+pages = {135-148},
+year = {1988},
+publisher = {Taylor & Francis},
+doi = {10.1080/15287398809531194},
+ note ={PMID: 3418743},
+
+URL = {
+ https://doi.org/10.1080/15287398809531194
+
+},
+eprint = {
+ https://doi.org/10.1080/15287398809531194
+
+}
+
+}
+
+
+
@Article{Kazius2005,
author = "Kazius, J. and McGuire, R. and Bursi, R.",
year = 2005,
diff --git a/figures/results.csv b/figures/results.csv
new file mode 100644
index 0000000..f37c9b5
--- /dev/null
+++ b/figures/results.csv
@@ -0,0 +1,10 @@
+tpr,fpr
+R-SVM,0.56,0.33
+R-RF,0.56,0.29
+R-DL,0.88,0.76
+TF,0.63,0.37
+TF-FS,0.61,0.36
+L,0.85,0.22
+L-HC,0.89,0.21
+L-P,0.32,0.21
+L-P-HC,0.32,0.21
diff --git a/figures/roc.png b/figures/roc.png
new file mode 100644
index 0000000..fddafdf
--- /dev/null
+++ b/figures/roc.png
Binary files differ
diff --git a/mutagenicity.md b/mutagenicity.md
index 6c8b7be..69341d1 100644
--- a/mutagenicity.md
+++ b/mutagenicity.md
@@ -23,32 +23,33 @@ institute:
name: Berlin Institute for Medical Systems Biology, Max Delbrück Center for Molecular Medicine in the Helmholtz Association
address: "Robert-Rössle-Strasse 10, Berlin, 13125, Germany"
bibliography: bibliography.bib
-keywords: mutagenicity, (Q)SAR, lazar, random forest, support vector machine, deep learning
+keywords: mutagenicity, QSAR, lazar, random forest, support vector machine, deep learning
+
documentclass: scrartcl
tblPrefix: Table
+figPrefix: Figure
+header-includes:
+ - \usepackage{setspace}
+ - \doublespacing
+ - \usepackage{lineno}
+ - \linenumbers
...
Abstract
========
-k-nearest neighbor (`lazar`), random forest, support vector machine and deep
-learning algorithms were applied to a new *Salmonella* mutagenicity dataset
-with 8281 unique chemical structures. Algorithm performance was evaluated
-using 5-fold crossvalidation.
-TODO
-- results
-- conclusion
+Random forest, support vector machine, deep learning and k-nearest neighbor
+(`lazar`) algorithms, were applied to new *Salmonella* mutagenicity dataset
+with 8309 unique chemical structures. The best prediction accuracies in
+10-fold-crossvalidation were obtained with `lazar` models, that gave accuracies
+similar to the interlaboratory variability of the Ames test.
Introduction
============
-TODO: algo history
-
-TODO: dataset history
-
-TODO: open problems
+TODO
-The main objective of this study was
+The main objectives of this study were
- to generate a new training dataset, by combining the most comprehensive public mutagenicity datasets
- to compare the performance of global models (RF, SVM, Neural Nets) with local models (`lazar`)
@@ -59,14 +60,14 @@ Materials and Methods
Data
----
-For all methods, the same training dataset was used. The
+An identical training dataset was used for all models. The
training dataset was compiled from the following sources:
- Kazius/Bursi Dataset (4337 compounds, @Kazius2005): <http://cheminformatics.org/datasets/bursi/cas_4337.zip>
- Hansen Dataset (6513 compounds, @Hansen2009): <http://doc.ml.tu-berlin.de/toxbenchmark/Mutagenicity_N6512.csv>
-- EFSA Dataset (695 compounds): <https://data.europa.eu/euodp/data/storage/f/2017-0719T142131/GENOTOX%20data%20and%20dictionary.xls>
+- EFSA Dataset (695 compounds @EFSA2016): <https://data.europa.eu/euodp/data/storage/f/2017-0719T142131/GENOTOX%20data%20and%20dictionary.xls>
Mutagenicity classifications from Kazius and Hansen datasets were used
without further processing. To achieve consistency with these
@@ -78,14 +79,13 @@ Line Entry Specification*) strings of the compound structures.
Duplicated experimental data with the same outcome was merged into a
single value, because it is likely that it originated from the same
experiment. Contradictory results were kept as multiple measurements in
-the database. The combined training dataset contains 8281 unique
+the database. The combined training dataset contains 8309 unique
structures.
-Source code for all data download, extraction and merge operations is
-publicly available from the git repository
-<https://git.in-silico.ch/mutagenicity-paper> under a GPL3 License.
-
-TODO: check/fix git repo
+Source code for all data download, extraction and merge operations is publicly
+available from the git repository <https://git.in-silico.ch/mutagenicity-paper>
+under a GPL3 License. The new combined dataset can be found at
+<https://git.in-silico.ch/mutagenicity-paper/data/mutagenicity.csv>.
Algorithms
----------
@@ -116,12 +116,12 @@ sections.
#### Neighbour identification
-Similarity calculations were based on MolPrint2D fingerprints (@Bender2004) from the OpenBabel cheminformatics library
-(@OBoyle2011a). The MolPrint2D fingerprint uses
-atom environments as molecular representation, which resembles basically
-the chemical concept of functional groups. For each atom in a molecule,
-it represents the chemical environment using the atom types of connected
-atoms.
+Similarity calculations were based on MolPrint2D fingerprints (*MP2D*,
+@Bender2004) from the OpenBabel cheminformatics library (@OBoyle2011a). The
+MolPrint2D fingerprint uses atom environments as molecular representation,
+which resembles basically the chemical concept of functional groups. For each
+atom in a molecule, it represents the chemical environment using the atom types
+of connected atoms.
MolPrint2D fingerprints are generated dynamically from chemical
structures and do not rely on predefined lists of fragments (such as
@@ -134,7 +134,7 @@ From MolPrint2D fingerprints a feature vector with all atom environments
of a compound can be constructed that can be used to calculate chemical
similarities.
-The chemical similarity between two compounds a and b is expressed as
+The chemical similarity between two compounds $a$ and $b$ is expressed as
the proportion between atom environments common in both structures $A \cap B$
and the total number of atom environments $A \cup B$ (Jaccard/Tanimoto
index).
@@ -148,16 +148,16 @@ absence of closely related neighbours, we follow a tiered approach:
- First a similarity threshold of 0.5 is used to collect neighbours,
to create a local QSAR model and to make a prediction for the query
- compound.
+ compound. This are predictions with *high confidence*.
- If any of these steps fails, the procedure is repeated with a
similarity threshold of 0.2 and the prediction is flagged with a
warning that it might be out of the applicability domain of the
- training data.
+ training data (*low confidence*).
- Similarity thresholds of 0.5 and 0.2 are the default values chosen
- > by the software developers and remained unchanged during the
- > course of these experiments.
+ by the software developers and remained unchanged during the
+ course of these experiments.
Compounds with the same structure as the query structure are
automatically eliminated from neighbours to obtain unbiased predictions
@@ -187,8 +187,8 @@ structural diversity of the training data. If no similar compounds are
found in the training data no predictions will be generated. Warnings
are issued if the similarity threshold had to be lowered from 0.5 to 0.2
in order to enable predictions. Predictions without warnings can be
-considered as close to the applicability domain and predictions with
-warnings as more distant from the applicability domain. Quantitative
+considered as close to the applicability domain (*high confidence*) and predictions with
+warnings as more distant from the applicability domain (*low confidence*). Quantitative
applicability domain information can be obtained from the similarities
of individual neighbours.
@@ -209,28 +209,21 @@ of individual neighbours.
- Public web interface:
<https://lazar.in-silico.ch>
-### Random Forest, Support Vector Machines, and Deep Learning in R-project
+### R Random Forest, Support Vector Machines, and Deep Learning
-For the Random Forest (RF), Support Vector Machines (SVM), and Deep
-Learning (DL) models, molecular descriptors were calculated
-with the PaDEL-Descriptors program (<http://www.yapcwsoft.com> version 2.21, @Yap2011).
-
-TODO: @Verena PaDEL descriptor description
-
-TODO: sentence ??
-
-From these descriptors were
-chosen, which were actually used for the generation of the DL model.
+#### PaDEL descriptors
+For Random Forest (RF), Support Vector Machines (SVM), and Deep
+Learning (DL) models, molecular descriptors were calculated
+with the PaDEL-Descriptors program (<http://www.yapcwsoft.com> version 2.21, @Yap2011). The same descriptors were used for TensorFlow models.
-In comparison to `lazar`, three other models (Random Forest (RF), Support
-Vector Machines (SVM), and Deep Learning (DL)) were evaluated.
+TODO: **Verena** kannst Du bitte die PaDEL Deskriptoren etwas ausfuehrlicher beschreiben (welche Typen, Anzahl, Bedeutung etc)
For the generation of these models, molecular 1D and 2D descriptors of
the training dataset were calculated using PaDEL-Descriptors (<http://www.yapcwsoft.com> version
2.21, @Yap2011).
-As the training dataset contained over 8280 instances, it was decided to
+As the training dataset contained over 8309 instances, it was decided to
delete instances with missing values during data pre-processing.
Furthermore, substances with equivocal outcome were removed. The final
training dataset contained 8080 instances with known mutagenic
@@ -257,13 +250,19 @@ absolute shrinkage and selection operator*) regression was performed
using the '*glmnet*'-function (package '*glmnet*'). The reduced dataset
was used for the generation of the pre-trained models.
+#### Random Forest
+
For the RF model, the '*randomForest*'-function (package
'*randomForest*') was used. A forest with 1000 trees with maximal
terminal nodes of 200 was grown for the prediction.
+#### Support Vector Machines
+
The '*svm*'-function (package 'e1071') with a *radial basis function
kernel* was used for the SVM model.
+#### Deep Learning
+
The DL model was generated using the '*h2o.deeplearning*'-function
(package '*h2o*'). The DL contained four hidden layer with 70, 50, 50,
and 10 neurons, respectively. Other hyperparameter were set as follows:
@@ -273,6 +272,8 @@ Weights and biases were in a first step determined with an unsupervised
DL model. These values were then used for the actual, supervised DL
model.
+TODO: **Verena** kannst Du bitte ueberpruefen, ob das noch stimmt und ggf die Figure 1 anpassen
+
To validate these models, an internal cross-validation approach was
chosen. The training dataset was randomly split in training data, which
contained 95% of the data, and validation data, which contain 5% of the
@@ -280,20 +281,9 @@ data. A feature selection with LASSO on the training data was performed,
reducing the number of descriptors to approximately 100. This step was
repeated five times. Based on each of the five different training data,
the predictive models were trained and the performance tested with the
-validation data. This step was repeated 10 times. Furthermore, a
-y-randomisation using the RF model was performed. During
-y-randomisation, the outcome (y-variable) is randomly permuted. The
-theory is that after randomisation of the outcome, the model should not
-be able to correlate the outcome to the properties (descriptor values)
-of the substances. The performance of the model should therefore
-indicate a by change prediction with an accuracy of about 50%. If this
-is true, it can be concluded that correlation between actual outcome and
-properties of the substances is real and not by chance (@Rücker2007).
+validation data. This step was repeated 10 times.
-![](figures/image1.png){width="6.26875in" height="5.486111111111111in"}
-
-Figure 1: Flowchart of the generation and validation of the models
-generated in R-project
+![Flowchart of the generation and validation of the models generated in R-project](figures/image1.png){#fig:valid}
#### Applicability domain
@@ -305,18 +295,13 @@ to the training dataset. Therefore, PA dataset is within the AD of the
training dataset and the models can be used to predict the genotoxic
potential of the PA dataset.
-#### y-randomisation
-
-After y-randomisation of the outcome, the accuracy and CCR are around
-50%, indicating a chance in the distribution of the results. This shows,
-that the outcome is actually related to the predictors and not by
-chance.
-
-### Deep Learning in TensorFlow
+### TensorFlow Deep Learning
Alternatively, a DL model was established with Python-based TensorFlow
program (<https://www.tensorflow.org/>) using the high-level API Keras
-(<https://www.tensorflow.org/guide/keras>) to build the models.
+(<https://www.tensorflow.org/guide/keras>) to build the models.
+
+TensorFlow models used the same PaDEL descriptors as the R models.
Data pre-processing was done by rank transformation using the
'*QuantileTransformer*' procedure. A sequential model has been used.
@@ -328,9 +313,11 @@ a L^2^-penalty of 0.001 was used for the input layer. For training of
the model, the ADAM algorithm was used to minimise the cross-entropy
loss using the default parameters of Keras. Training was performed for
100 epochs with a batch size of 64. The model was implemented with
-Python 3.6 and Keras. For training of the model, a 6-fold
-cross-validation was used. Accuracy was estimated by ROC-AUC and
-confusion matrix.
+Python 3.6 and Keras. For training of the model, a 10-fold
+cross-validation was used.
+
+TODO: **Philipp** kannst Du bitte ueberpruefen ob die Beschreibung noch stimmt
+und ob der Ablauf von Verena (Figure 1) auch fuer Deine Modelle gilt
Validation
----------
@@ -338,6 +325,8 @@ Validation
Results
=======
+TODO: **Verena** und **Philipp**: koennt Ihr bitte gegenchecken, ob ich keine Zahlendreher in den Ergebnissen habe
+
R Models
--------
@@ -452,9 +441,11 @@ The results of all crossvalidation experiments are summarized in @tbl:summary.
|NPV|{{R-RF.npv}}|{{R-SVM.npv}}|{{R-DL.npv}}|{{tensorflow-all.npv}}|{{tensorflow-selected.npv}}|{{lazar-all.npv}}|{{lazar-high-confidence.npv}}|{{lazar-padel-all.npv}}|{{lazar-padel-high-confidence.npv}}|
|Nr. predictions|{{R-RF.n}}|{{R-SVM.n}}|{{R-DL.n}}|{{tensorflow-all.n}}|{{tensorflow-selected.n}}|{{lazar-all.n}}|{{lazar-high-confidence.n}}|{{lazar-padel-all.n}}|{{lazar-padel-high-confidence.n}}|
-: Summary of crossvalidation results. *R-RF*: R Random Forests, *R-SVM*: R Support Vector Machines, *R-DL*: R Deep Learning, *TF*: TensorFlow without feature selection, *TF-FS*: TensorFlow with feature selection, *L*: lazar, *L-HC*: lazar high confidence predictions, *L-P*: lazar with PaDEL descriptors, *L-P-HC*: lazar PaADEL high confidence predictions, *PPV*: Positive predictive value (Precision), *NPV*: Negative predictive value {#tbl:summary}
+: Summary of crossvalidation results. *R-RF*: R Random Forests, *R-SVM*: R Support Vector Machines, *R-DL*: R Deep Learning, *TF*: TensorFlow without feature selection, *TF-FS*: TensorFlow with feature selection, *L*: lazar, *L-HC*: lazar high confidence predictions, *L-P*: lazar with PaDEL descriptors, *L-P-HC*: lazar PaDEL high confidence predictions, *PPV*: Positive predictive value (Precision), *NPV*: Negative predictive value {#tbl:summary}
+
+@fig:roc shows the position of crossvalidation results in receiver operating characteristic (ROC) space.
-TODO ROC curve, also in discussion
+![ROC plot of crossvalidation results. *R-RF*: R Random Forests, *R-SVM*: R Support Vector Machines, *R-DL*: R Deep Learning, *TF*: TensorFlow without feature selection, *TF-FS*: TensorFlow with feature selection, *L*: lazar, *L-HC*: lazar high confidence predictions, *L-P*: lazar with PaDEL descriptors, *L-P-HC*: lazar PaDEL high confidence predictions (overlaps with L-P)](figures/roc.png){#fig:roc}
Discussion
==========
@@ -462,17 +453,45 @@ Discussion
Data
----
-This combined dataset is according to our knowledge the largest dataset for *Salmonella* mutagenicity. I can be downloaded from TODO
+A new training dataset for *Salmonella* mutagenicity was created from three
+different sources (@Kazius2005, @Hansen2009, @EFSA2016). It contains 8309
+unique chemical structures, which is according to our knowledge the largest
+public mutagenicity dataset presently available. The new training data can be
+downloaded from
+<https://git.in-silico.ch/mutagenicity-paper/data/mutagenicity.csv>.
Model performance
-----------------
-lazar best
-slightly less predictions (could be a good thing)
-
-There are two major differences between `lazar` and R/TensorFlow models:
-
-- `lazar` uses MolPrint2D fingerprints, while the other models use PaDEL descriptors
+@tbl:summary and @fig:roc show that the standard `lazar` algorithm (with MP2D
+fingerprints) give the most accurate crossvalidation results. R Random Forests,
+Support Vector Machines and TensorFlow models have similar accuracies with
+balanced sensitivity (true position rate) and specificity (true negative rate).
+`lazar` models with PaDEL descriptors have low sensitivity and R Deep Learning
+models have low specificity.
+
+The accuracy of `lazar` *in-silico* predictions are comparable to the
+interlaboratory variability of the Ames test (80-85% according to
+@Benigni1988), especially for predictions with high confidence
+({{lazar-high-confidence.acc_perc}}%). This is a clear indication that
+*in-silico* predictions can be as reliable as the bioassays, if the compounds
+are close to the applicability domain. This conclusion is also supported by our
+analysis of `lazar` lowest observed effect level predictions, which are also
+similar to the experimental variability (@Helma2018).
+
+The lowest number of predictions ({{lazar-padel-high-confidence.n}}) has been
+obtained from `lazar`/PaDEL high confidence predictions, the largest number of
+predictions comes from TensorFlow models ({{tensorflow-all.n}}). Standard
+`lazar` give a slightly lower number of predictions ({{lazar-all.n}}) than R
+and TensorFlow models. This is not necessarily a disadvantage, because `lazar`
+abstains from predictions, if the query compound is very dissimilar from the
+compounds in the training set and thus avoids to make predictions for compounds
+that do not fall into its applicability domain.
+
+There are two major differences between `lazar` and R/TensorFlow models, which
+might explain the different prediction accuracies:
+
+- `lazar` uses MolPrint2D fingerprints, while all other models use PaDEL descriptors
- `lazar` creates local models for each query compound and the other models use a single global model for all predictions
We will discuss both options in the following sections.
@@ -482,104 +501,66 @@ Descriptors
This study uses two types of descriptors to characterize chemical structures.
-MolPrint2D fingerprints (MP2D, @Bender2004) use
-atom environments (i.e. connected atoms for all atoms in a molecule) as
-molecular representation, which resembles basically the chemical concept of
-functional groups. MP2D descriptors are used to determine chemical similarities
-in lazar, and previous experiments have shown, that they give more accurate results than predefined descriptors (e.g.
-MACCS, FP2-4) for all investigated endpoints.
+MolPrint2D fingerprints (MP2D, @Bender2004) use atom environments (i.e.
+connected atoms for all atoms in a molecule) as molecular representation, which
+resembles basically the chemical concept of functional groups. MP2D descriptors
+are used to determine chemical similarities in lazar, and previous experiments
+have shown, that they give more accurate results than predefined descriptors
+(e.g. MACCS, FP2-4) for all investigated endpoints.
PaDEL calculates topological and physical-chemical descriptors.
-TODO: @Verena Beschreibung
-
-PaDEL descriptors were used for the R and TensorFlow models. In addition we have used PaDEL descriptors to calculate cosine similarities for the `lazar` algorithm and compared the results with standard MP2D similarities, which led to a significant decrease of `lazar` prediction accuracies. Based on this result we can conclude, that PaDEL descriptors are less suited for similarity calculations than MP2D descriptors.
-
-In order to investigate, if MP2D fingerprints are also a better option for global models we have tried to build R and TensorFlow models both with and without unsupervised feature selection. Unfortunately none of the algorithms was capable to deal with the large and sparsely populated descriptor matrix. Based on this result we can conclude, that MP2D descriptors are at the moment unsuitable for standard global machine learning algorithms. Please note that `lazar` does not suffer from the sparseness problem, because (a) it utilizes internally a much more efficient occurrence based representation and (b) it uses fingerprints only for similarity calculations and mot as model parameters.
+TODO: **Verena** kannst Du bitte die Deskriptoren nochmals kurz beschreiben
+
+PaDEL descriptors were used for the R and TensorFlow models. In addition we
+have used PaDEL descriptors to calculate cosine similarities for the `lazar`
+algorithm and compared the results with standard MP2D similarities, which led
+to a significant decrease of `lazar` prediction accuracies. Based on this
+result we can conclude, that PaDEL descriptors are less suited for similarity
+calculations than MP2D descriptors.
+
+In order to investigate, if MP2D fingerprints are also a better option for
+global models we have tried to build R and TensorFlow models both with and
+without unsupervised feature selection. Unfortunately none of the algorithms
+was capable to deal with the large and sparsely populated descriptor matrix.
+Based on this result we can conclude, that MP2D descriptors are at the moment
+unsuitable for standard global machine learning algorithms. Please note that
+`lazar` does not suffer from the sparseness problem, because (a) it utilizes
+internally a much more efficient occurrence based representation and (b) it
+uses fingerprints only for similarity calculations and mot as model parameters.
+
+Based on these results we can conclude, that PaDEL descriptors are less suited
+for similarity calculations than MP2D fingerprints and that current standard
+machine learning algorithms are not capable to utilize chemical fingerprints.
Algorithms
----------
-General model performance
-
-Based on the results of the cross-validation for all models, `lazar`, RF,
-SVM, DL (R-project) and DL (TensorFlow) it can be state that the
-prediction results are not optimal due to different reasons. The
-accuracy as measured during cross-validation of the four models (RF,
-SVM, DL (R-project and TensorFlow)) was partly low with CCR values
-between 59.3 and 68%, with the R-generated DL model and the
-TensorFlow-generated DL model showing the worst and the best
-performance, respectively. The validation of the R-generated DL model
-revealed a high sensitivity (89.2%) but an unacceptably low specificity
-of 29.9% indicating a high number of false positive estimates. The
-TensorFlow-generated DL model, however, showed an acceptable but not
-optimal accuracy of 68%, a sensitivity of 69.9% and a specificity of
-45.6%. The low specificity indicates that both DL models tends to
-predict too many instances as positive (genotoxic), and therefore have a
-high false positive rate. This allows at least with the TensorFlow
-generated DL model to make group statements, but the confidence for
-estimations of single PAs appears to be insufficiently low.
-
-Several factors have likely contributed to the low to moderate
-performance of the used methods as shown during the cross-validation:
-
-1. The outcome in the training dataset was based on the results of AMES
- tests for genotoxicity [ICH 2011](#_ENREF_63)(), an *in vitro* test
- in different strains of the bacteria *Salmonella typhimurium*. In
- this test, mutagenicity is evaluated with and without prior
- metabolic activation of the test substance. Metabolic activation
- could result in the formation of genotoxic metabolites from
- non-genotoxic parent compounds. However, no distinction was made in
- the training dataset between substances that needed metabolic
- activation before being mutagenic and those that were mutagenic
- without metabolic activation. `lazar` is able to handle this
- 'inaccuracy' in the training dataset well due to the way the
- algorithm works: `lazar` predicts the genotoxic potential based on the
- neighbours of substances with comparable structural features,
- considering mutagenic and not mutagenic neighbours. Based on the
- structural similarity, a probability for mutagenicity and no
- mutagenicity is calculated independently from each other (meaning
- that the sum of probabilities does not necessarily adds up to 100%).
- The class with the higher outcome is then the overall outcome for
- the substance.
-
-> In contrast, the other models need to be trained first to recognise
-> the structural features that are responsible for genotoxicity.
-> Therefore, the mixture of substances being mutagenic with and without
-> metabolic activation in the training dataset may have adversely
-> affected the ability to separate the dataset in two distinct classes
-> and thus explains the relatively low performance of these models.
-
-2. Machine learning algorithms try to find an optimized solution in a
- high-dimensional (one dimension per each predictor) space. Sometimes
- these methods do not find the global optimum of estimates but only
- local (not optimal) solutions. Strategies to find the global
- solutions are systematic variation (grid search) of the
- hyperparameters of the methods, which may be very time consuming in
- particular in large datasets.
-
+`lazar` is formally a *k-nearest-neighbor* algorithm that searches for similar
+structures for a given compound and calculates the prediction based on the
+experimental data for these structures. The QSAR literature calls such models
+frequently *local models*, because models are generated specifically for each
+query compound. R and TensorFlow models are in contrast *global models*, i.e. a
+single model is used to make predictions for all compounds. It has been
+postulated in the past, that local models are more accurate, because they can
+account better for mechanisms, that affect only a subset of the training data.
+Our results seem to support this assumption, because `lazar` models perform
+better than global models. Both types of models use however different
+descriptors, and for this reason we cannot draw a definitive conclusion if the
+model algorithm or the descriptor type are the reason for the observed
+differences. In order to answer this question, we would have to use global
+modelling algorithms that are capable to handle large, sparse binary matrices.
Conclusions
===========
-In this study, an attempt was made to predict the genotoxic potential of
-PAs using five different machine learning techniques (`lazar`, RF, SVM, DL
-(R-project and TensorFlow). The results of all models fitted only partly
-to the findings in literature, with best results obtained with the
-TensorFlow DL model. Therefore, modelling allows statements on the
-relative risks of genotoxicity of the different PA groups. Individual
-predictions for selective PAs appear, however, not reliable on the
-current basis of the used training dataset.
-
-This study emphasises the importance of critical assessment of
-predictions by QSAR models. This includes not only extensive literature
-research to assess the plausibility of the predictions, but also a good
-knowledge of the metabolism of the test substances and understanding for
-possible mechanisms of toxicity.
-
-In further studies, additional machine learning techniques or a modified
-(extended) training dataset should be used for an additional attempt to
-predict the genotoxic potential of PAs.
+A new public *Salmonella* mutagenicity training dataset with 8309 compounds was
+created and used it to train `lazar`, R and TensorFlow models. The best
+performance was obtained with `lazar` models using MolPrint2D descriptors, with
+prediction accuracies comparable to the interlaboratory variability of the Ames
+test. Differences between algorithms (local vs. global models) and/or
+descriptors (MolPrint2D vs PaDEL) may be responsible for the different
+prediction accuracies.
References
==========
diff --git a/results.yaml b/results.yaml
index 489c1dd..b45d16c 100644
--- a/results.yaml
+++ b/results.yaml
@@ -7,6 +7,7 @@ R-SVM:
:n: 8070
:acc: 0.61
:tpr: 0.56
+ :fpr: 0.33
:tnr: 0.67
:ppv: 0.62
:npv: 0.61
@@ -23,6 +24,7 @@ R-RF:
:n: 8070
:acc: 0.64
:tpr: 0.56
+ :fpr: 0.29
:tnr: 0.71
:ppv: 0.66
:npv: 0.62
@@ -39,6 +41,7 @@ R-DL:
:n: 8070
:acc: 0.56
:tpr: 0.88
+ :fpr: 0.76
:tnr: 0.24
:ppv: 0.53
:npv: 0.67
@@ -55,6 +58,7 @@ tensorflow-all:
:n: 8080
:acc: 0.63
:tpr: 0.63
+ :fpr: 0.37
:tnr: 0.63
:ppv: 0.62
:npv: 0.63
@@ -71,6 +75,7 @@ tensorflow-selected:
:n: 8080
:acc: 0.63
:tpr: 0.61
+ :fpr: 0.36
:tnr: 0.64
:ppv: 0.63
:npv: 0.63
@@ -87,6 +92,7 @@ lazar-all:
:n: 7781
:acc: 0.82
:tpr: 0.85
+ :fpr: 0.22
:tnr: 0.78
:ppv: 0.8
:npv: 0.84
@@ -103,6 +109,7 @@ lazar-high-confidence:
:n: 5890
:acc: 0.84
:tpr: 0.89
+ :fpr: 0.21
:tnr: 0.79
:ppv: 0.83
:npv: 0.85
@@ -119,6 +126,7 @@ lazar-padel-all:
:n: 4089
:acc: 0.58
:tpr: 0.32
+ :fpr: 0.21
:tnr: 0.79
:ppv: 0.56
:npv: 0.59
@@ -135,6 +143,7 @@ lazar-padel-high-confidence:
:n: 4081
:acc: 0.58
:tpr: 0.32
+ :fpr: 0.21
:tnr: 0.79
:ppv: 0.56
:npv: 0.59
diff --git a/scripts/confusion-matrix-summary.rb b/scripts/confusion-matrix-summary.rb
index e0adf4e..129d69a 100755
--- a/scripts/confusion-matrix-summary.rb
+++ b/scripts/confusion-matrix-summary.rb
@@ -18,6 +18,7 @@ ARGV.each do |f|
:n => (tp+fp+tn+fn).to_i,
:acc => ((tp+tn)/(tp+fp+tn+fn)).round(2),
:tpr => (tp/(tp+fn)).round(2),
+ :fpr => (fp/(fp+tn)).round(2),
:tnr => (tn/(tn+fp)).round(2),
:ppv => (tp/(tp+fp)).round(2),
:npv => (tn/(tn+fn)).round(2),
diff --git a/scripts/results2csv.rb b/scripts/results2csv.rb
new file mode 100755
index 0000000..a519754
--- /dev/null
+++ b/scripts/results2csv.rb
@@ -0,0 +1,9 @@
+#!/usr/bin/env ruby
+require "yaml"
+
+data = YAML.load(File.read ARGV[0])
+puts "tpr,fpr"
+data.each do |algo,values|
+ algo = algo.sub("tensorflow","TF").sub("selected","FS").sub("lazar","L").sub("padel","P").sub("high-confidence","HC").sub("-all","")
+ puts [algo,values[:tpr],values[:fpr]].join(",")
+end
diff --git a/scripts/roc.R b/scripts/roc.R
new file mode 100755
index 0000000..cb219fc
--- /dev/null
+++ b/scripts/roc.R
@@ -0,0 +1,8 @@
+#!/usr/bin/env Rscript
+library(ggplot2)
+data <- read.csv("figures/results.csv",header=T)
+p <- ggplot(data, aes(x=fpr, y=tpr)) + geom_abline()
+p <- p + geom_label(label=rownames(data) )
+p <- p + expand_limits(x=c(0,1),y=c(0,1))
+p <- p + labs(x = "False positive rate", y = "True positive rate")
+ggsave("figures/roc.png")