@conference{230, keywords = {Generalisation, fully-connected feedforward networks, activation distributions, MLP}, author = {Marelie Davel}, title = {Activation gap generators in neural networks}, abstract = {No framework exists that can explain and predict the generalisation ability of DNNs in general circumstances. In fact, this question has not been addressed for some of the least complicated of neural network architectures: fully-connected feedforward networks with ReLU activations and a limited number of hidden layers. Building on recent work [2] that demonstrates the ability of individual nodes in a hidden layer to draw class-specific activation distributions apart, we show how a simplified network architecture can be analysed in terms of these activation distributions, and more specifically, the sample distances or activation gaps each node produces. We provide a theoretical perspective on the utility of viewing nodes as activation gap generators, and define the gap conditions that are guaranteed to result in perfect classification of a set of samples. We support these conclusions with empirical results.}, year = {2019}, journal = {South African Forum for Artificial Intelligence Research (FAIR)}, chapter = {64-76}, month = {04/12-06/12/2019}, publisher = {CEUR workshop proceedings}, address = {Cape Town, South Africa}, }