neural: Validate models and store training data

This commit is contained in:
Elias Projahn 2021-11-23 16:26:04 +01:00
parent 7c87788cad
commit c0e2dfced2

View file

@ -1,16 +1,33 @@
# Find genes by training and applying a neural network. # Find genes by training and applying a neural network.
neural <- function(preset, progress = NULL, seed = 49641) { #
# @param seed The seed will be used to make the results reproducible.
# @param n_models This number specifies how many sets of training data should
# be created. For each set, there will be a model trained on the remaining
# training data and validated using this set. For non-training genes, the
# final score will be the mean of the result of applying the different
# models.
neural <- function(preset, progress = NULL, seed = 49641, n_models = 5) {
species_ids <- preset$species_ids species_ids <- preset$species_ids
gene_ids <- preset$gene_ids gene_ids <- preset$gene_ids
reference_gene_ids <- preset$reference_gene_ids reference_gene_ids <- preset$reference_gene_ids
cached("neural", c(species_ids, gene_ids, reference_gene_ids), { cached(
"neural",
c(species_ids, gene_ids, reference_gene_ids, seed, n_models),
{ # nolint
reference_count <- length(reference_gene_ids)
if (!n_models %in% 2:reference_count) {
stop(paste0(
"n_models has to be between 2 and the number of reference ",
"genes."
))
}
# Make results reproducible.
tensorflow::set_random_seed(seed) tensorflow::set_random_seed(seed)
gene_count <- length(gene_ids) # Step 1: Prepare input data.
# ---------------------------
progress_buffer <- 0
progress_step <- 1 / (2 * length(reference_gene_ids) + 1)
# Prefilter distances by species. # Prefilter distances by species.
distances <- geposan::distances[species %chin% species_ids] distances <- geposan::distances[species %chin% species_ids]
@ -26,7 +43,10 @@ neural <- function(preset, progress = NULL, seed = 49641) {
# Make a columns containing positions and distances for each # Make a columns containing positions and distances for each
# species. # species.
for (species_id in species_ids) { for (species_id in species_ids) {
species_data <- distances[species == species_id, .(gene, distance)] species_data <- distances[
species == species_id,
.(gene, distance)
]
# Only include species with at least 25% known values. As # Only include species with at least 25% known values. As
# positions and distances always coexist, we don't loose any # positions and distances always coexist, we don't loose any
@ -34,7 +54,7 @@ neural <- function(preset, progress = NULL, seed = 49641) {
species_data <- stats::na.omit(species_data) species_data <- stats::na.omit(species_data)
if (nrow(species_data) >= 0.25 * gene_count) { if (nrow(species_data) >= 0.25 * length(gene_ids)) {
data <- merge(data, species_data, all.x = TRUE) data <- merge(data, species_data, all.x = TRUE)
# Replace missing data with mean values. The neural network # Replace missing data with mean values. The neural network
@ -55,7 +75,14 @@ neural <- function(preset, progress = NULL, seed = 49641) {
} }
} }
# Extract the reference genes. if (!is.null(progress)) {
progress(0.1)
}
# Step 2: Prepare training data.
# ------------------------------
# Take out the reference data.
reference_data <- data[gene %chin% reference_gene_ids] reference_data <- data[gene %chin% reference_gene_ids]
reference_data[, score := 1.0] reference_data[, score := 1.0]
@ -68,18 +95,46 @@ neural <- function(preset, progress = NULL, seed = 49641) {
without_reference_data <- data[!gene %chin% reference_gene_ids] without_reference_data <- data[!gene %chin% reference_gene_ids]
reference_samples <- without_reference_data[ control_data <- without_reference_data[
sample( sample(
nrow(without_reference_data), nrow(without_reference_data),
nrow(reference_data) reference_count
) )
] ]
reference_samples[, score := 0.0] control_data[, score := 0.0]
# Merge training data. The training data includes all reference # Split the training data into random sets to have validation data
# genes as well as an equal number of random sample genes. # for each model.
training_data <- rbindlist(list(reference_data, reference_samples))
# Scramble the source tables.
reference_data <- reference_data[sample(reference_count)]
control_data <- control_data[sample(reference_count)]
networks <- list()
indices <- seq_len(reference_count)
indices_split <- split(indices, indices %% n_models)
for (i in seq_len(n_models)) {
training_data <- rbindlist(list(
reference_data[!indices_split[[i]]],
control_data[!indices_split[[i]]]
))
validation_data <- rbindlist(list(
reference_data[indices_split[[i]]],
control_data[indices_split[[i]]]
))
networks[[i]] <- list(
training_data = training_data,
validation_data = validation_data
)
}
# Step 3: Create, train and apply neural network.
# -----------------------------------------------
# Layers for the neural network. # Layers for the neural network.
input_layer <- length(input_vars) input_layer <- length(input_vars)
@ -87,11 +142,19 @@ neural <- function(preset, progress = NULL, seed = 49641) {
layer2 <- 0.5 * input_layer layer2 <- 0.5 * input_layer
layer3 <- 0.5 * layer2 layer3 <- 0.5 * layer2
# Train the model using the specified subset of the training data and # Convert data to matrix and normalize it.
# apply it for predicting the genes. to_matrix <- function(data) {
apply_network <- function(training_gene_ids, gene_ids) { data_matrix <- as.matrix(data[, ..input_vars])
# Create a new model for each training session, because the model colnames(data_matrix) <- NULL
# would keep its state across training sessions otherwise. keras::normalize(data_matrix)
}
data_matrix <- to_matrix(data)
output_vars <- NULL
for (i in seq_along(networks)) {
# Create a new model for each training session, because the
# model would keep its state across training sessions otherwise.
model <- keras::keras_model_sequential() |> model <- keras::keras_model_sequential() |>
keras::layer_dense( keras::layer_dense(
units = layer1, units = layer1,
@ -114,69 +177,70 @@ neural <- function(preset, progress = NULL, seed = 49641) {
) |> ) |>
keras::compile(loss = "binary_crossentropy") keras::compile(loss = "binary_crossentropy")
# Prepare training data by filtering it to the given genes and # Train the model.
# converting it to a matrix.
training_data <- training_data[gene %chin% training_gene_ids] network <- networks[[i]]
training_matrix <- as.matrix(training_data[, ..input_vars])
colnames(training_matrix) <- NULL training_data <- network$training_data
training_matrix <- keras::normalize(training_matrix) training_matrix <- to_matrix(training_data)
validation_data <- network$validation_data
validation_matrix <- to_matrix(validation_data)
fit <- keras::fit( fit <- keras::fit(
model, model,
x = training_matrix, x = training_matrix,
y = training_data$score, y = training_data$score,
validation_data = list(
x_val = validation_matrix,
y_val = validation_data$score
),
epochs = 300, epochs = 300,
verbose = FALSE verbose = FALSE
) )
# Convert the input data to a matrix. # Apply the model.
data_matrix <- as.matrix(data[gene %chin% gene_ids, ..input_vars])
colnames(data_matrix) <- NULL
data_matrix <- keras::normalize(data_matrix)
data[ data[, new_score := stats::predict(model, data_matrix)]
gene %chin% gene_ids,
score := stats::predict(model, data_matrix) # Remove the values of the training data itself.
data[gene %chin% training_data$gene, new_score := NA]
output_var <- sprintf("score%i", i)
setnames(data, "new_score", output_var)
output_vars <- c(output_vars, output_var)
# Store the details.
networks[[i]]$model <- model
networks[[i]]$fit <- fit
if (!is.null(progress)) {
progress(0.1 + i * (0.9 / n_models))
}
}
# Compute the final score as the mean score.
data[,
score := mean(as.numeric(.SD), na.rm = TRUE),
.SDcols = output_vars,
by = gene
] ]
if (!is.null(progress)) { if (!is.null(progress)) {
progress_buffer <<- progress_buffer + progress_step progress(1.0)
progress(progress_buffer)
}
list(
training_gene_ids = training_gene_ids,
gene_ids = gene_ids,
model = model,
fit = fit
)
}
# Apply the network to all non-training genes first.
network <- apply_network(
training_data$gene,
gene_ids[!gene_ids %chin% training_data$gene]
)
cross_networks <- NULL
# Apply the network to the training genes leaving out the gene itself.
for (training_gene_id in training_data$gene) {
cross_network <- apply_network(
training_data[gene != training_gene_id, gene],
training_gene_id
)
cross_networks <- c(cross_networks, cross_network)
} }
structure( structure(
list( list(
results = data[, .(gene, score)], results = data[, .(gene, score)],
network = network, seed = seed,
cross_networks = cross_networks n_models = n_models,
all_results = data[, !..input_vars],
networks = networks
), ),
class = "geposan_method_results" class = "geposan_method_results"
) )
}) }
)
} }