You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: _bibliography/references.bib
+11-1Lines changed: 11 additions & 1 deletion
Original file line number
Diff line number
Diff line change
@@ -10,6 +10,7 @@ @inproceedings{
10
10
url={https://openreview.net/forum?id=EMkrwJY2de},
11
11
pdf={https://openreview.net/pdf?id=EMkrwJY2de},
12
12
abstract={Message Passing Graph Neural Networks are known to suffer from two problems that are sometimes believed to be diametrically opposed: over-squashing and over-smoothing. The former results from topological bottlenecks that hamper the information flow from distant nodes and are mitigated by spectral gap maximization, primarily, by means of edge additions. However, such additions often promote over-smoothing that renders nodes of different classes less distinguishable. Inspired by the Braess phenomenon, we argue that deleting edges can address over-squashing and over-smoothing simultaneously. This insight explains how edge deletions can improve generalization, thus connecting spectral gap optimization to a seemingly disconnected objective of reducing computational resources by pruning graphs for lottery tickets. To this end, we propose a computationally effective spectral gap optimization framework to add or delete edges and demonstrate its effectiveness on the long range graph benchmark and on larger heterophilous datasets.},
abstract={Graph neural networks exhibiting a rescale invariance, like GATs, obey a conservation law of its parameters, which has been exploited to derive a balanced state that induces good initial trainability. Yet, finite learning rates as used in practice topple the network out of balance during training. This effect is even more pronounced with larger learning rates that tend to induce improved generalization but make the training dynamics less robust. To support even larger learning rates, we propose to dynamically balance the network according to a different criterion, based on relative gradients, that promotes faster and better. In combination with large learning rates and gradient clipping, dynamic rebalancing significantly improves generalization on real-world data. We observe that rescaling provides us with the flexibility to control the order in which network layers are trained. This leads to novel insights into similar phenomena as grokking, which can further boost generalization performance.}
24
25
}
25
26
26
-
@article{Hossain2024,
27
+
@inproceedings{
28
+
hossain2024pruning,
29
+
title={Pruning neural network models for gene regulatory dynamics using data and domain knowledge},
30
+
author = {Hossain, Intekhab and Fischer, Jonas and Burkholz, Rebekka and Quackenbush, John},
31
+
booktitle={Thirty-eighth Conference on Neural Information Processing Systems},
32
+
year={2024},
33
+
abstract={The practical utility of machine learning models in the sciences often hinges on their interpretability. It is common to assess a model's merit for scientific discovery, and thus novel insights, by how well it aligns with already available domain knowledge - a dimension that is currently largely disregarded in the comparison of neural network models. While pruning can simplify deep neural network architectures and excels in identifying sparse models, as we show in the context of gene regulatory network inference, state-of-the-art techniques struggle with biologically meaningful structure learning. To address this issue, we propose DASH, a generalizable framework that guides network pruning by using domain-specific structural information in model fitting and leads to sparser, better interpretable models that are more robust to noise. Using both synthetic data with ground truth information, as well as real-world gene expression data, we show that DASH, using knowledge about gene interaction partners within the putative regulatory network, outperforms general pruning methods by a large margin and yields deeper insights into the biological systems being studied.}
34
+
}
35
+
36
+
@article{hossain2024biologically,
27
37
author = {Hossain, Intekhab and Fanfani, Viola and Fischer, Jonas and Quackenbush, John and Burkholz, Rebekka},
28
38
title={Biologically informed NeuralODEs for genome-wide regulatory dynamics},
<p>The practical utility of machine learning models in the sciences often hinges on their interpretability. It is common to assess a model’s merit for scientific discovery, and thus novel insights, by how well it aligns with already available domain knowledge - a dimension that is currently largely disregarded in the comparison of neural network models. While pruning can simplify deep neural network architectures and excels in identifying sparse models, as we show in the context of gene regulatory network inference, state-of-the-art techniques struggle with biologically meaningful structure learning. To address this issue, we propose DASH, a generalizable framework that guides network pruning by using domain-specific structural information in model fitting and leads to sparser, better interpretable models that are more robust to noise. Using both synthetic data with ground truth information, as well as real-world gene expression data, we show that DASH, using knowledge about gene interaction partners within the putative regulatory network, outperforms general pruning methods by a large margin and yields deeper insights into the biological systems being studied.</p>
<spanclass="na">title</span><spanclass="p">=</span><spanclass="s">{Pruning neural network models for gene regulatory dynamics using data and domain knowledge}</span><spanclass="p">,</span>
225
+
<spanclass="na">author</span><spanclass="p">=</span><spanclass="s">{Hossain, Intekhab and Fischer, Jonas and Burkholz, Rebekka and Quackenbush, John}</span><spanclass="p">,</span>
226
+
<spanclass="na">booktitle</span><spanclass="p">=</span><spanclass="s">{Thirty-eighth Conference on Neural Information Processing Systems}</span><spanclass="p">,</span>
<p>Gene regulatory network (GRN) models that are formulated as ordinary differential equations (ODEs) can accurately explain temporal gene expression patterns and promise to yield new insights into important cellular processes, disease progression, and intervention design. Learning such gene regulatory ODEs is challenging, since we want to predict the evolution of gene expression in a way that accurately encodes the underlying GRN governing the dynamics and the nonlinear functional relationships between genes. Most widely used ODE estimation methods either impose too many parametric restrictions or are not guided by meaningful biological insights, both of which impede either scalability, explainability, or both.</p>
<spanclass="na">author</span><spanclass="p">=</span><spanclass="s">{Hossain, Intekhab and Fanfani, Viola and Fischer, Jonas and Quackenbush, John and Burkholz, Rebekka}</span><spanclass="p">,</span>
224
272
<spanclass="na">title</span><spanclass="p">=</span><spanclass="s">{Biologically informed NeuralODEs for genome-wide regulatory dynamics}</span><spanclass="p">,</span>
0 commit comments