From 7228014188ab8033ee35e372d0e6b1969158d8cf Mon Sep 17 00:00:00 2001 From: Joel Dapello Date: Tue, 2 Mar 2021 12:14:28 -0500 Subject: [PATCH 1/2] updated CM with a number of pytorch models and adversarially trained models --- candidate_models/base_models/__init__.py | 93 ++++++++++-- .../model_commitments/model_layer_def.py | 134 ++++++++++-------- 2 files changed, 161 insertions(+), 66 deletions(-) diff --git a/candidate_models/base_models/__init__.py b/candidate_models/base_models/__init__.py index 1ba40bc..c7213e7 100644 --- a/candidate_models/base_models/__init__.py +++ b/candidate_models/base_models/__init__.py @@ -273,10 +273,46 @@ def vonecornet(model_name='cornets'): wrapper.image_size = 224 return wrapper +def GoN_model(function, train, image_size): + from urllib import request + import torch + from model_tools.activations.pytorch import load_preprocess_images + module = import_module(f'torchvision.models') + model_ctr = getattr(module, function) + model = model_ctr() + preprocessing = functools.partial(load_preprocess_images, image_size=image_size) + # load weights + framework_home = os.path.expanduser(os.getenv('CM_HOME', '~/.candidate_models')) + weightsdir_path = os.getenv('CM_TSLIM_WEIGHTS_DIR', + os.path.join(framework_home, 'model-weights', 'resnet-50-robust')) + weights_id = 'resnet-50-' + train + weights_path = os.path.join(weightsdir_path, weights_id) + if not os.path.isfile(weights_path): + weight_urls = { + 'resnet-50-GNTsig0.5': 'https://github.com/bethgelab/game-of-noise/releases/download/v1.0/Gauss_sigma_0.5_Model.pth', + 'resnet-50-ANT3x3_SIN': 'https://github.com/bethgelab/game-of-noise/releases/download/v1.0/ANT3x3_SIN_Model.pth', + } + assert weights_id in weight_urls + url = weight_urls[weights_id] + _logger.debug(f"Downloading weights for resnet-50-GoN from {url} to {weights_path}") + os.makedirs(weightsdir_path, exist_ok=True) + request.urlretrieve(url, weights_path) + + if torch.cuda.is_available(): + checkpoint = torch.load(weights_path) + else: + checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) -def robust_model(function, image_size): + # process weights -- remove the attacker and prepocessing weights + model.load_state_dict(checkpoint['model_state_dict']) + # wrap model with pytorch wrapper + wrapper = PytorchWrapper(identifier=weights_id, model=model, preprocessing=preprocessing) + wrapper.image_size = image_size + return wrapper + +def robust_model(function, penalty, eps, image_size): from urllib import request - import torch + import torch from model_tools.activations.pytorch import load_preprocess_images module = import_module(f'torchvision.models') model_ctr = getattr(module, function) @@ -286,20 +322,32 @@ def robust_model(function, image_size): framework_home = os.path.expanduser(os.getenv('CM_HOME', '~/.candidate_models')) weightsdir_path = os.getenv('CM_TSLIM_WEIGHTS_DIR', os.path.join(framework_home, 'model-weights', 'resnet-50-robust')) - weights_path = os.path.join(weightsdir_path, 'resnet-50-robust') + weights_id = 'resnet-50-robust-' + penalty + '-' + eps + weights_path = os.path.join(weightsdir_path, weights_id) if not os.path.isfile(weights_path): - url = 'http://andrewilyas.com/ImageNet.pt' + weight_urls = { + 'resnet-50-robust-l2-3': 'https://www.dropbox.com/s/knf4uimlqsi1yz8/imagenet_l2_3_0.pt?dl=1', + 'resnet-50-robust-linf-4': 'https://www.dropbox.com/s/axfuary2w1cnyrg/imagenet_linf_4.pt?dl=1', + 'resnet-50-robust-linf-8': 'https://www.dropbox.com/s/yxn15a9zklz3s8q/imagenet_linf_8.pt?dl=1', + } + assert weights_id in weight_urls + url = weight_urls[weights_id] _logger.debug(f"Downloading weights for resnet-50-robust from {url} to {weights_path}") os.makedirs(weightsdir_path, exist_ok=True) request.urlretrieve(url, weights_path) - checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) + + if torch.cuda.is_available(): + checkpoint = torch.load(weights_path) + else: + checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) + # process weights -- remove the attacker and prepocessing weights weights = checkpoint['model'] weights = {k[len('module.model.'):]: v for k, v in weights.items() if 'attacker' not in k} weights = {k: weights[k] for k in list(weights.keys())[2:]} model.load_state_dict(weights) # wrap model with pytorch wrapper - wrapper = PytorchWrapper(identifier=function+'-robust', model=model, preprocessing=preprocessing) + wrapper = PytorchWrapper(identifier=weights_id, model=model, preprocessing=preprocessing) wrapper.image_size = image_size return wrapper @@ -382,12 +430,37 @@ def __init__(self): _key_functions = { 'alexnet': lambda: torchvision_model('alexnet', image_size=224), + 'vgg-11-pt': lambda: torchvision_model('vgg11', image_size=224), + 'vgg-11-bn-pt': lambda: torchvision_model('vgg11_bn', image_size=224), + 'vgg-13-pt': lambda: torchvision_model('vgg13', image_size=224), + 'vgg-13-bn-pt': lambda: torchvision_model('vgg13_bn', image_size=224), + 'vgg-16-pt': lambda: torchvision_model('vgg16', image_size=224), + 'vgg-16-bn-pt': lambda: torchvision_model('vgg16_bn', image_size=224), + 'vgg-19-pt': lambda: torchvision_model('vgg19', image_size=224), + 'vgg-19-bn-pt': lambda: torchvision_model('vgg19_bn', image_size=224), 'squeezenet1_0': lambda: torchvision_model('squeezenet1_0', image_size=224), 'squeezenet1_1': lambda: torchvision_model('squeezenet1_1', image_size=224), - 'resnet-18': lambda: torchvision_model('resnet18', image_size=224), - 'resnet-34': lambda: torchvision_model('resnet34', image_size=224), - 'resnet-50-pytorch': lambda: torchvision_model('resnet50', image_size=224), - 'resnet-50-robust': lambda: robust_model('resnet50', image_size=224), + 'resnet-18-pt': lambda: torchvision_model('resnet18', image_size=224), + 'resnet-34-pt': lambda: torchvision_model('resnet34', image_size=224), + 'resnet-50-pt': lambda: torchvision_model('resnet50', image_size=224), + 'resnet-101-pt': lambda: torchvision_model('resnet101', image_size=224), + 'resnet-152-pt': lambda: torchvision_model('resnet152', image_size=224), + 'densenet-121-pt': lambda: torchvision_model('densenet121', image_size=224), + 'densenet-169-pt': lambda: torchvision_model('densenet169', image_size=224), + 'densenet-201-pt': lambda: torchvision_model('densenet201', image_size=224), + 'densenet-161-pt': lambda: torchvision_model('densenet161', image_size=224), + 'resnext-50-32x4d-pt': lambda: torchvision_model('resnext50_32x4d', image_size=224), + 'resnext-101-32x8d-pt': lambda: torchvision_model('resnext101_32x8d', image_size=224), + 'wide-resnet-50-pt': lambda: torchvision_model('wide_resnet50_2', image_size=224), + 'wide-resnet-101-pt': lambda: torchvision_model('wide_resnet101_2', image_size=224), + + 'resnet-50-robust-l2-3': lambda: robust_model('resnet50', penalty='l2', eps='3', image_size=224), + 'resnet-50-robust-linf-4': lambda: robust_model('resnet50', penalty='linf', eps='4', image_size=224), + 'resnet-50-robust-linf-8': lambda: robust_model('resnet50', penalty='linf', eps='8', image_size=224), + + 'resnet-50-GNTsig0.5': lambda: GoN_model('resnet50', train='GNTsig0.5', image_size=224), + 'resnet-50-ANT3x3_SIN': lambda: GoN_model('resnet50', train='ANT3x3_SIN', image_size=224), + 'voneresnet-50': lambda: voneresnet(model_name='resnet50'), 'voneresnet-50-robust': lambda: voneresnet(model_name='resnet50_at'), diff --git a/candidate_models/model_commitments/model_layer_def.py b/candidate_models/model_commitments/model_layer_def.py index cb664f7..04f13ab 100644 --- a/candidate_models/model_commitments/model_layer_def.py +++ b/candidate_models/model_commitments/model_layer_def.py @@ -2,6 +2,13 @@ from brainscore.submission.ml_pool import ModelLayers +def resnet_pt_layers(units): + return (['relu', 'maxpool'] + + [f"layer1.{i}" for i in range(units[0])] + + [f"layer2.{i}" for i in range(units[1])] + + [f"layer3.{i}" for i in range(units[2])] + + [f"layer4.{i}" for i in range(units[3])] + + ["avgpool"]) def resnet50_layers(bottleneck_version): return resnet_layers(bottleneck_version=bottleneck_version, units=[3, 4, 6, 3]) @@ -72,6 +79,14 @@ def prednet(): 'classifier.2', 'classifier.5'], # fc-[relu]{6,7,8} 'vgg-16': [f'block{i + 1}_pool' for i in range(5)] + ['fc1', 'fc2'], 'vgg-19': [f'block{i + 1}_pool' for i in range(5)] + ['fc1', 'fc2'], + 'vgg-11-pt': [f'features.{i}' for i in [1,2,4,5,7,9,10,12,14,15,17,19,20]] + ['classifier.1', 'classifier.4'], + 'vgg-11-bn-pt': [f'features.{i}' for i in [2,3,6,7,10,13,14,17,20,21,24,27,28]] + ['classifier.1', 'classifier.4'], + 'vgg-13-pt': [f'features.{i}' for i in [1,3,4,6,8,9,11,13,14,16,18,19,21,23,24]] + ['classifier.1', 'classifier.4'], + 'vgg-13-bn-pt': [f'features.{i}' for i in [2,5,6,9,12,13,16,19,20,23,26,27,30,33,34]] + ['classifier.1', 'classifier.4'], + 'vgg-16-pt': [f'features.{i}' for i in [1,3,4,6,8,9,11,13,15,16,18,20,21,23,25,27,29,30]] + ['classifier.1', 'classifier.4'], + 'vgg-16-bn-pt': [f'features.{i}' for i in [2,5,6,9,12,13,16,19,22,23,26,29,32,33,36,39,42,43]] + ['classifier.1', 'classifier.4'], + 'vgg-19-pt': [f'features.{i}' for i in [1,3,4,6,8,9,11,13,15,17,18,20,22,24,26,27,29,31,33,35,36]] + ['classifier.1', 'classifier.4'], + 'vgg-19-bn-pt': [f'features.{i}' for i in [2,5,6,9,12,13,16,19,22,25,26,29,32,35,38,39,42,45,48,51,52]] + ['classifier.1', 'classifier.4'], 'squeezenet1_0': ['features.' + layer for layer in # max pool + fire outputs (ignoring pools) @@ -116,61 +131,75 @@ def prednet(): [f'block13_sepconv{i + 1}_act' for i in range(2)] + [f'block14_sepconv{i + 1}_act' for i in range(2)] + ['avg_pool'], - 'resnet-18': - ['conv1'] + - ['layer1.0.relu', 'layer1.1.relu'] + - ['layer2.0.relu', 'layer2.0.downsample.0', 'layer2.1.relu'] + - ['layer3.0.relu', 'layer3.0.downsample.0', 'layer3.1.relu'] + - ['layer4.0.relu', 'layer4.0.downsample.0', 'layer4.1.relu'] + - ['avgpool'], - 'resnet-34': - ['conv1'] + - ['layer1.0.conv2', 'layer1.1.conv2', 'layer1.2.conv2'] + - ['layer2.0.downsample.0', 'layer2.1.conv2', 'layer2.2.conv2', 'layer2.3.conv2'] + - ['layer3.0.downsample.0', 'layer3.1.conv2', 'layer3.2.conv2', 'layer3.3.conv2', - 'layer3.4.conv2', 'layer3.5.conv2'] + - ['layer4.0.downsample.0', 'layer4.1.conv2', 'layer4.2.conv2'] + - ['avgpool'], - 'resnet-50': - ['conv1'] + - ['layer1.0.conv3', 'layer1.1.conv3', 'layer1.2.conv3'] + - ['layer2.0.downsample.0', 'layer2.1.conv3', 'layer2.2.conv3', 'layer2.3.conv3'] + - ['layer3.0.downsample.0', 'layer3.1.conv3', 'layer3.2.conv3', 'layer3.3.conv3', - 'layer3.4.conv3', 'layer3.5.conv3'] + - ['layer4.0.downsample.0', 'layer4.1.conv3', 'layer4.2.conv3'] + - ['avgpool'], + 'resnet-18-pt': resnet_pt_layers([2,2,2,2]), + 'resnet-34-pt': resnet_pt_layers([3,4,6,3]), + 'resnet-50-pt': resnet_pt_layers([3,4,6,3]), + 'wide-resnet-50-pt': resnet_pt_layers([3,4,6,3]), + 'resnet-101-pt': resnet_pt_layers([3,4,23,3]), + 'wide-resnet-101-pt': resnet_pt_layers([3,4,23,3]), + 'resnet-152-pt': resnet_pt_layers([3,8,36,3]), + 'resnext-50-32x4d-pt': resnet_pt_layers([3,4,6,3]), + 'resnext-101-32x8d-pt': resnet_pt_layers([3,4,23,3]), + 'resnet-50-robust-l2-3': resnet_pt_layers([3,4,6,3]), + 'resnet-50-robust-linf-4': resnet_pt_layers([3,4,6,3]), + 'resnet-50-robust-linf-8': resnet_pt_layers([3,4,6,3]), + 'resnet-50-GNTsig0.5': resnet_pt_layers([3,4,6,3]), + 'resnet-50-ANT3x3_SIN': resnet_pt_layers([3,4,6,3]), + 'resnet-50-SIN': resnet_pt_layers([3,4,6,3]), + 'resnet-50-SIN_IN': resnet_pt_layers([3,4,6,3]), + 'resnet-50-SIN_IN_IN': resnet_pt_layers([3,4,6,3]), 'voneresnet-50': ['vone_block'] + - ['model.layer1.0.conv3', 'model.layer1.1.conv3', 'model.layer1.2.conv3'] + - ['model.layer2.0.downsample.0', 'model.layer2.1.conv3', 'model.layer2.2.conv3', 'model.layer2.3.conv3'] + - ['model.layer3.0.downsample.0', 'model.layer3.1.conv3', 'model.layer3.2.conv3', 'model.layer3.3.conv3', - 'model.layer3.4.conv3', 'model.layer3.5.conv3'] + - ['model.layer4.0.downsample.0', 'model.layer4.1.conv3', 'model.layer4.2.conv3'] + + ['model.layer1.0', 'model.layer1.1', 'model.layer1.2'] + + ['model.layer2.0', 'model.layer2.1', 'model.layer2.2', 'model.layer2.3'] + + ['model.layer3.0', 'model.layer3.1', 'model.layer3.2', 'model.layer3.3', + 'model.layer3.4', 'model.layer3.5'] + + ['model.layer4.0', 'model.layer4.1', 'model.layer4.2'] + ['model.avgpool'], 'voneresnet-50-robust': ['vone_block'] + - ['model.layer1.0.conv3', 'model.layer1.1.conv3', 'model.layer1.2.conv3'] + - ['model.layer2.0.downsample.0', 'model.layer2.1.conv3', 'model.layer2.2.conv3', 'model.layer2.3.conv3'] + - ['model.layer3.0.downsample.0', 'model.layer3.1.conv3', 'model.layer3.2.conv3', 'model.layer3.3.conv3', - 'model.layer3.4.conv3', 'model.layer3.5.conv3'] + - ['model.layer4.0.downsample.0', 'model.layer4.1.conv3', 'model.layer4.2.conv3'] + + ['model.layer1.0', 'model.layer1.1', 'model.layer1.2'] + + ['model.layer2.0', 'model.layer2.1', 'model.layer2.2', 'model.layer2.3'] + + ['model.layer3.0', 'model.layer3.1', 'model.layer3.2', 'model.layer3.3', + 'model.layer3.4', 'model.layer3.5'] + + ['model.layer4.0', 'model.layer4.1', 'model.layer4.2'] + ['model.avgpool'], - 'resnet-50-robust': - ['conv1'] + - ['layer1.0.conv3', 'layer1.1.conv3', 'layer1.2.conv3'] + - ['layer2.0.downsample.0', 'layer2.1.conv3', 'layer2.2.conv3', 'layer2.3.conv3'] + - ['layer3.0.downsample.0', 'layer3.1.conv3', 'layer3.2.conv3', 'layer3.3.conv3', - 'layer3.4.conv3', 'layer3.5.conv3'] + - ['layer4.0.downsample.0', 'layer4.1.conv3', 'layer4.2.conv3'] + - ['avgpool'], - 'resnet-50-pytorch': - ['conv1'] + - ['layer1.0.conv3', 'layer1.1.conv3', 'layer1.2.conv3'] + - ['layer2.0.downsample.0', 'layer2.1.conv3', 'layer2.2.conv3', 'layer2.3.conv3'] + - ['layer3.0.downsample.0', 'layer3.1.conv3', 'layer3.2.conv3', 'layer3.3.conv3', - 'layer3.4.conv3', 'layer3.5.conv3'] + - ['layer4.0.downsample.0', 'layer4.1.conv3', 'layer4.2.conv3'] + - ['avgpool'], + 'densenet-121-pt': + ['features.relu0', 'features.pool0'] + + [f'features.denseblock1.denselayer{i + 1}.relu1' for i in range(6)] + + ['features.transition1.relu'] + + [f'features.denseblock2.denselayer{i + 1}.relu1' for i in range(12)] + + ['features.transition2.relu'] + + [f'features.denseblock3.denselayer{i + 1}.relu1' for i in range(24)] + + ['features.transition3.relu'] + + [f'features.denseblock4.denselayer{i + 1}.relu1' for i in range(16)], + 'densenet-169-pt': + ['features.relu0', 'features.pool0'] + + [f'features.denseblock1.denselayer{i + 1}.relu1' for i in range(6)] + + ['features.transition1.relu'] + + [f'features.denseblock2.denselayer{i + 1}.relu1' for i in range(12)] + + ['features.transition2.relu'] + + [f'features.denseblock3.denselayer{i + 1}.relu1' for i in range(32)] + + ['features.transition3.relu'] + + [f'features.denseblock4.denselayer{i + 1}.relu1' for i in range(32)], + 'densenet-201-pt': + ['features.relu0', 'features.pool0'] + + [f'features.denseblock1.denselayer{i + 1}.relu1' for i in range(6)] + + ['features.transition1.relu'] + + [f'features.denseblock2.denselayer{i + 1}.relu1' for i in range(12)] + + ['features.transition2.relu'] + + [f'features.denseblock3.denselayer{i + 1}.relu1' for i in range(48)] + + ['features.transition3.relu'] + + [f'features.denseblock4.denselayer{i + 1}.relu1' for i in range(32)], + 'densenet-161-pt': + ['features.relu0', 'features.pool0'] + + [f'features.denseblock1.denselayer{i + 1}.relu2' for i in range(6)] + + ['features.transition1.relu'] + + [f'features.denseblock2.denselayer{i + 1}.relu2' for i in range(12)] + + ['features.transition2.relu'] + + [f'features.denseblock3.denselayer{i + 1}.relu2' for i in range(36)] + + ['features.transition3.relu'] + + [f'features.denseblock4.denselayer{i + 1}.relu2' for i in range(24)], # Slim 'inception_v1': ['MaxPool_2a_3x3'] + @@ -251,13 +280,6 @@ def prednet(): model_layers = ModelLayers(layers) model_layers['vggface'] = model_layers['vgg-16'] -for sin_model in ['resnet50-SIN', 'resnet50-SIN_IN', 'resnet50-SIN_IN_IN']: - model_layers[sin_model] = \ - ['conv1'] + \ - [f'layer{seq}.{bottleneck}.relu' - for seq, bottlenecks in enumerate([3, 4, 6, 3], start=1) - for bottleneck in range(bottlenecks)] + \ - ['avgpool'] for version, multiplier, image_size in [ # v1 From 4ea29037c9216c3892d63a478e6088c572ca00c4 Mon Sep 17 00:00:00 2001 From: Joel Dapello Date: Tue, 2 Mar 2021 12:37:08 -0500 Subject: [PATCH 2/2] add few more layers to alexnet --- .../model_commitments/model_layer_def.py | 39 +++++++++++++------ 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/candidate_models/model_commitments/model_layer_def.py b/candidate_models/model_commitments/model_layer_def.py index 04f13ab..92757e2 100644 --- a/candidate_models/model_commitments/model_layer_def.py +++ b/candidate_models/model_commitments/model_layer_def.py @@ -73,20 +73,35 @@ def prednet(): layers = { - 'alexnet': - [ # conv-relu-[pool]{1,2,3,4,5} - 'features.2', 'features.5', 'features.7', 'features.9', 'features.12', - 'classifier.2', 'classifier.5'], # fc-[relu]{6,7,8} + 'alexnet': + [f'features.{i}' for i in [1,2,4,5,7,9,11,12]] + + [f'classifier.{i}' for i in [2,5]], # fc-[relu]{6,7,8} 'vgg-16': [f'block{i + 1}_pool' for i in range(5)] + ['fc1', 'fc2'], 'vgg-19': [f'block{i + 1}_pool' for i in range(5)] + ['fc1', 'fc2'], - 'vgg-11-pt': [f'features.{i}' for i in [1,2,4,5,7,9,10,12,14,15,17,19,20]] + ['classifier.1', 'classifier.4'], - 'vgg-11-bn-pt': [f'features.{i}' for i in [2,3,6,7,10,13,14,17,20,21,24,27,28]] + ['classifier.1', 'classifier.4'], - 'vgg-13-pt': [f'features.{i}' for i in [1,3,4,6,8,9,11,13,14,16,18,19,21,23,24]] + ['classifier.1', 'classifier.4'], - 'vgg-13-bn-pt': [f'features.{i}' for i in [2,5,6,9,12,13,16,19,20,23,26,27,30,33,34]] + ['classifier.1', 'classifier.4'], - 'vgg-16-pt': [f'features.{i}' for i in [1,3,4,6,8,9,11,13,15,16,18,20,21,23,25,27,29,30]] + ['classifier.1', 'classifier.4'], - 'vgg-16-bn-pt': [f'features.{i}' for i in [2,5,6,9,12,13,16,19,22,23,26,29,32,33,36,39,42,43]] + ['classifier.1', 'classifier.4'], - 'vgg-19-pt': [f'features.{i}' for i in [1,3,4,6,8,9,11,13,15,17,18,20,22,24,26,27,29,31,33,35,36]] + ['classifier.1', 'classifier.4'], - 'vgg-19-bn-pt': [f'features.{i}' for i in [2,5,6,9,12,13,16,19,22,25,26,29,32,35,38,39,42,45,48,51,52]] + ['classifier.1', 'classifier.4'], + 'vgg-11-pt': + [f'features.{i}' for i in [1,2,4,5,7,9,10,12,14,15,17,19,20]] + + ['classifier.1', 'classifier.4'], + 'vgg-11-bn-pt': + [f'features.{i}' for i in [2,3,6,7,10,13,14,17,20,21,24,27,28]] + + ['classifier.1', 'classifier.4'], + 'vgg-13-pt': + [f'features.{i}' for i in [1,3,4,6,8,9,11,13,14,16,18,19,21,23,24]] + + ['classifier.1', 'classifier.4'], + 'vgg-13-bn-pt': + [f'features.{i}' for i in [2,5,6,9,12,13,16,19,20,23,26,27,30,33,34]] + + ['classifier.1', 'classifier.4'], + 'vgg-16-pt': + [f'features.{i}' for i in [1,3,4,6,8,9,11,13,15,16,18,20,21,23,25,27,29,30]] + + ['classifier.1', 'classifier.4'], + 'vgg-16-bn-pt': + [f'features.{i}' for i in [2,5,6,9,12,13,16,19,22,23,26,29,32,33,36,39,42,43]] + + ['classifier.1', 'classifier.4'], + 'vgg-19-pt': + [f'features.{i}' for i in [1,3,4,6,8,9,11,13,15,17,18,20,22,24,26,27,29,31,33,35,36]] + + ['classifier.1', 'classifier.4'], + 'vgg-19-bn-pt': + [f'features.{i}' for i in [2,5,6,9,12,13,16,19,22,25,26,29,32,35,38,39,42,45,48,51,52]] + + ['classifier.1', 'classifier.4'], 'squeezenet1_0': ['features.' + layer for layer in # max pool + fire outputs (ignoring pools)