repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
deficient-efficient | deficient-efficient-master/load_wrn50_2.py | import re
import torch
import torch.nn.functional as F
from torch.utils import model_zoo
from models.blocks import Conv
from models.wide_resnet import WRN_50_2
from collections import OrderedDict
def all_equal(iterable_1, iterable_2):
return all([x == y for x,y in zip(iterable_1, iterable_2)])
# functional model... | 4,000 | 36.046296 | 138 | py |
deficient-efficient | deficient-efficient-master/imagenet_experiments.py | import json
#settings = ['ACDC_%i'%n for n in [12, 28]] +\
# ['SepHashed_%.2f'%s for s in [0.08, 0.58]] +\
settings = ['Generic_%.2f'%s for s in [0.03, 0.21]] +\
['Tucker_%.2f'%s for s in [0.25, 0.73]] +\
['TensorTrain_%.2f'%s for s in [0.27, 0.75]] +\
['Shuffle_%i'%n for n i... | 1,203 | 36.625 | 77 | py |
deficient-efficient | deficient-efficient-master/collate_results.py | # open schedule json, then search for which machines the longest progressed job
# has run on
import json
import sys
import os
import torch
import subprocess
from subprocess import PIPE
from collections import OrderedDict
from funcs import what_conv_block
from models.wide_resnet import WideResNet, WRN_50_2
from models.... | 5,460 | 37.730496 | 125 | py |
deficient-efficient | deficient-efficient-master/history.py | # opens checkpoints and prints the commands used to run each
import torch
import os
import argparse
parser = argparse.ArgumentParser(description='Inspect saved checkpoints')
parser.add_argument('--match', type=str, default=None, help='Filter checkpoints by keyword.')
if __name__ == '__main__':
args = parser.parse... | 766 | 33.863636 | 113 | py |
deficient-efficient | deficient-efficient-master/models/resnet.py | '''This is a rewriting of the native resnet definition that comes with Pytorch, to allow it to use our blocks and
convolutions for imagenet experiments. Annoyingly, the pre-trained models don't use pre-activation blocks.'''
import torch
import torch.nn as nn
import math
import torchvision.models.resnet
import torch.u... | 6,623 | 34.047619 | 120 | py |
deficient-efficient | deficient-efficient-master/models/hashed.py | # HashedNet Convolutional Layer: https://arxiv.org/abs/1504.04788
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
class HashedConv2d(nn.Conv2d):
"""Conv2d with the weights of the convolutional filters parameterised using
a budgeted subset of parameters and rand... | 5,827 | 48.811966 | 176 | py |
deficient-efficient | deficient-efficient-master/models/darts.py | # DARTS network definition
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.checkpoint import checkpoint
from collections import namedtuple
from .blocks import DepthwiseSep
from .wide_resnet import group_lowrank, compres... | 11,450 | 32.979228 | 429 | py |
deficient-efficient | deficient-efficient-master/models/wide_resnet.py | # network definition
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
# wildcard import for legacy reasons
if __name__ == '__main__':
from blocks import *
else:
from .blocks import *
def parse_options(convty... | 10,717 | 36.872792 | 106 | py |
deficient-efficient | deficient-efficient-master/models/__init__.py | from .wide_resnet import *
#from .resnet import *
| 50 | 16 | 26 | py |
deficient-efficient | deficient-efficient-master/models/decomposed.py | # Substitute layer explicitly decomposing the tensors in convolutional layers
# All implemented using tntorch: https://github.com/rballester/tntorch
# All also use a separable design: the low-rank approximate pointwise
# convolution is preceded by a grouped convolution
import math
import torch
import torch.nn as nn
imp... | 8,252 | 39.856436 | 99 | py |
deficient-efficient | deficient-efficient-master/models/MobileNetV2.py | import torch
import torch.nn as nn
import math
# wildcard import for legacy reasons
if __name__ == '__main__':
import sys
sys.path.append("..")
from models.blocks import *
from models.wide_resnet import compression, group_lowrank
# only used in the first convolution, which we do not substitute by convention
... | 8,316 | 33.086066 | 118 | py |
deficient-efficient | deficient-efficient-master/models/blocks.py | # blocks and convolution definitions
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
if __name__ == 'blocks' or __name__ == '__main__':
from hashed import HashedConv2d, HalfHashe... | 18,941 | 43.992874 | 120 | py |
ACRO | ACRO-main/setup.py | """Python setup script for installing ACRO."""
from pathlib import Path
from setuptools import find_packages, setup
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name="acro",
version="0.4.2",
license="MIT",
maintainer="Jim Smith",
mai... | 1,427 | 31.454545 | 86 | py |
ACRO | ACRO-main/test/stata.py | #!/usr/bin/env python
"""ACRO Stata Tests."""
# ACRO Tests
import os
import pandas as pd
from acro import ACRO, add_constant
# Instantiate ACRO
acro = ACRO()
# Load test data
path = os.path.join("../data", "test_data.dta")
df = pd.read_stata(path)
df.head()
# Pandas crosstab
table = pd.crosstab(df.year, df.gr... | 2,056 | 18.778846 | 88 | py |
ACRO | ACRO-main/test/test_initial.py | """This module contains unit tests."""
import json
import os
import numpy as np
import pandas as pd
import pytest
from acro import ACRO, add_constant, record, utils
from acro.record import Records, load_records
# pylint: disable=redefined-outer-name
PATH: str = "RES_PYTEST"
@pytest.fixture
def data() -> pd.DataF... | 14,281 | 34.527363 | 86 | py |
ACRO | ACRO-main/test/test_stata_interface.py | """This module contains unit tests for the stata interface."""
import os
import pandas as pd
import pytest
from acro import ACRO
from stata.acro_stata_parser import (
apply_stata_expstmt,
apply_stata_ifstmt,
find_brace_contents,
parse_and_run,
parse_table_details,
)
# pylint: disable=redefined-o... | 9,013 | 29.764505 | 88 | py |
ACRO | ACRO-main/test/__init__.py | 0 | 0 | 0 | py | |
ACRO | ACRO-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# -- Path setup --------------------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
from acro.version import __version__
# -- Project information -----------------------------------------------... | 1,120 | 23.369565 | 78 | py |
ACRO | ACRO-main/acro/record.py | """ACRO: Output storage and serialization."""
import datetime
import hashlib
import json
import logging
import os
import shutil
from pathlib import Path
from typing import Any
import pandas as pd
from pandas import DataFrame
from .version import __version__
logger = logging.getLogger("acro:records")
def load_outc... | 19,460 | 32.209898 | 86 | py |
ACRO | ACRO-main/acro/utils.py | """ACRO: Utility Functions."""
import logging
from collections.abc import Callable
from inspect import FrameInfo, getframeinfo
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from statsmodels.iolib.table import SimpleTable
logger = logging.getLogger("acro")
AGGFUNC: dict[str, Callable] ... | 10,320 | 27.991573 | 87 | py |
ACRO | ACRO-main/acro/acro.py | """ACRO: Automatic Checking of Research Outputs."""
import json
import logging
import os
import pathlib
import warnings
from collections.abc import Callable
from inspect import stack
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import yaml
from pandas import DataFrame
from st... | 32,438 | 36.243398 | 86 | py |
ACRO | ACRO-main/acro/version.py | """ACRO version number."""
__version__ = "0.4.2"
| 49 | 15.666667 | 26 | py |
ACRO | ACRO-main/acro/__init__.py | """ACRO."""
from .acro import *
| 32 | 10 | 19 | py |
ACRO | ACRO-main/stata/acro_stata_parser.py | # file with commands to manage the stata-acro interface
import pandas as pd
from acro import ACRO, add_constant
def apply_stata_ifstmt(raw: str, df: pd.DataFrame) -> pd.DataFrame:
if len(raw) == 0:
return df
else:
# add braces aroubd each clause- keeping any in the original
raw = "( "... | 7,197 | 30.432314 | 87 | py |
ACRO | ACRO-main/notebooks/test-nursery.py | """
ACRO Tests
Copyright : Maha Albashir, Richard Preen, Jim Smith 2023.
"""
# import libraries
import os
import numpy as np
import pandas as pd
from scipy.io.arff import loadarff
from acro import ACRO, add_constant
# Instantiate ACRO by making an acro object
print(
"\n Creating an acro object().\n"
"The TR... | 9,954 | 32.518519 | 95 | py |
Dcm2Bids | Dcm2Bids-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
description = """Reorganising NIfTI files from dcm2niix into the Brain Imaging Data Structure"""
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
import glo... | 1,007 | 24.2 | 96 | py |
Dcm2Bids | Dcm2Bids-master/dcm2bids/structure.py | # -*- coding: utf-8 -*-
import os
class Participant(object):
"""
"""
def __init__(self, name, session=None):
self._name = name
self._session = session
@property
def name(self):
return "sub-{}".format(self._name)
@property
def session(self):
if self._ses... | 1,347 | 19.738462 | 64 | py |
Dcm2Bids | Dcm2Bids-master/dcm2bids/dcm2niix.py | # -*- coding: utf-8 -*-
import glob
import os
from subprocess import call
from collections import OrderedDict
import re
from .utils import clean
def sidecar2meta(carfile):
"""extract series number and potential
suffixes (reflecting e.g. separate images for each echo) from the dcm2niix
sidecar file name"... | 1,932 | 28.738462 | 84 | py |
Dcm2Bids | Dcm2Bids-master/dcm2bids/dcm2bids.py | # -*- coding: utf-8 -*-
import glob
import os
import datetime
import logging
from collections import OrderedDict
from .dcm2niix import Dcm2niix
from .sidecarparser import Sidecarparser
from .structure import Participant
from .utils import (
load_json,
make_directory_tree,
splitext_,
save_json,
wri... | 6,650 | 33.82199 | 87 | py |
Dcm2Bids | Dcm2Bids-master/dcm2bids/utils.py | # -*- coding: utf-8 -*-
import json
import os
import shutil
import csv
from collections import OrderedDict
import sys
def load_json(filename):
with open(filename, "r") as f:
data = json.load(f, strict=False)
return data
def save_json(data, filename):
with open(filename, "w") as f:
json... | 1,632 | 23.014706 | 85 | py |
Dcm2Bids | Dcm2Bids-master/dcm2bids/__init__.py | # -*- coding: utf-8 -*-
__version__ = "0.4.0.1"
| 48 | 15.333333 | 23 | py |
Dcm2Bids | Dcm2Bids-master/dcm2bids/sidecarparser.py | # -*- coding: utf-8 -*-
import itertools
import os
from collections import defaultdict, OrderedDict
from future.utils import iteritems
from .structure import Acquisition
from .utils import load_json, save_json, splitext_
import logging
class Sidecarparser(object):
def __init__(self, sidecars, descriptions, sele... | 7,753 | 38.969072 | 102 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train... | 5,676 | 40.437956 | 96 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/setup.py | """Grayscale, edge detection, and facial landmarks are pre-computed
prior to training. Obscuring and watermarks are done in-place in
datasets.py.
>>> python setup.py grayscale ./data/images ./data/grayscale
>>> python setup.py edge ./data/images ./data/edge
>>> python setup.py mask ./data/images ./data/mask
"""
from... | 6,049 | 35.666667 | 84 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class MVAE(nn.Module):
def __init__(self, n_latents=250, use_cuda=False):
sup... | 8,131 | 36.13242 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/datasets.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import random
import numpy as np
from copy import deepcopy
from PIL import Image
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
N_MODALITIES = 6
VALID_P... | 4,896 | 36.669231 | 78 | py |
multimodal-vae-public | multimodal-vae-public-master/vision/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
f... | 19,025 | 47.659847 | 107 | py |
multimodal-vae-public | multimodal-vae-public-master/mnist/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import save_image
from train ... | 4,692 | 37.154472 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/mnist/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.parameter import Parameter
class MVAE(nn.Module):
"""Multimoda... | 5,973 | 31.11828 | 73 | py |
multimodal-vae-public | multimodal-vae-public-master/mnist/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from tor... | 10,817 | 39.215613 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import loa... | 4,827 | 37.624 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
# MAP from index to the interpretable label
LABEL_IX_TO_STRING = {0: 'T-shirt/top... | 6,482 | 30.779412 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/datasets.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from torchvision.datasets import MNIST
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of da... | 1,428 | 46.633333 | 96 | py |
multimodal-vae-public | multimodal-vae-public-master/fashionmnist/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from mo... | 10,820 | 39.226766 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from datasets import ... | 5,196 | 36.121429 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/utils.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import string
import random
import time
import math
import torch
from torch.autograd import Variable
max_length = 4 # max of 4 characters in an image
all_characters = '0123456789'
n_characters = len(all_chara... | 1,417 | 23.877193 | 60 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/model.py | """This model will be quite similar to mnist/model.py
except we will need to be slightly fancier in the
encoder/decoders for each modality. Likely, we will need
convolutions/deconvolutions and RNNs.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
imp... | 9,790 | 34.219424 | 83 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/datasets.py | """
This script generates a dataset similar to the MultiMNIST dataset
described in [1]. However, we remove any translation.
[1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene
understanding with generative models." Advances in Neural Information
Processing Systems. 2016.
"""
from __future__ import division
... | 13,354 | 37.93586 | 113 | py |
multimodal-vae-public | multimodal-vae-public-master/multimnist/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision impo... | 11,314 | 39.555556 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/sample.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision.utils import save_image
from train import load_checkpoin... | 5,535 | 38.542857 | 82 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from datasets import N_ATTRS
class MVAE(nn.Module):
"""Multimodal Variational Autoencoder.
... | 7,415 | 31.243478 | 74 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/datasets.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import copy
import random
import numpy as np
import numpy.random as npr
from PIL import Image
from random import shuffle
from scipy.misc import imresize
import torch
from torch.utils.data.... | 6,170 | 39.333333 | 111 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision impo... | 11,037 | 40.340824 | 105 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba19/model.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
sys.path.append('../celeba')
from datasets import N_ATTRS
class MVAE(nn.Module):
"""... | 8,328 | 32.316 | 91 | py |
multimodal-vae-public | multimodal-vae-public-master/celeba19/train.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import numpy as np
from tqdm import tqdm
from itertools import combinations
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
fro... | 14,718 | 40.345506 | 129 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/main.py | from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
import yaml
import argparse
import utilities
import os
import torch
import shutil
def datasetFactory(config, do, args=None):
c_data =config["data"]
if args is None:
gl = utilities.GettingLists(data_for_training=c_da... | 10,447 | 45.435556 | 153 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/reconstruction_data.py | from main import choosing_model
import yaml
import argparse
import utilities
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
from utilities import to_numpy
def saving_files(x, y, out, database, name):
PATH = "make_graph/data"+'/'+database+'/'+name
x = ... | 2,493 | 32.253333 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/reconstruction_plot.py | from main import choosing_model
import yaml
import argparse
import utilities
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
from utilities import to_numpy
def plotting(in_, NN_out, out, name, database,
k_list =[1,2,3,4], save=False, vmin=-0.5, vma... | 4,950 | 35.138686 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/OOD.py | import yaml
from evaluation import saving_files
import argparse
import utilities
from utilities import to_numpy
import os
import torch
import pytorch_lightning as pl
import numpy as np
import matplotlib.pyplot as plt
def load_ood(arg, size = 64, dir_skeleton= None):
if dir_skeleton is None:
dir_skeleton... | 5,428 | 42.087302 | 100 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/evaluation.py | import yaml
import argparse
import utilities
import os
import torch
import numpy as np
from main import datasetFactory
import pytorch_lightning as pl
def saving_files(data, database, name, dir_= "make_graph"):
if len(data) != 1:
PATH = os.path.join(dir_, "test_loss", database)
if not os.path.exi... | 3,492 | 36.159574 | 98 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO_epsilon_v2.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn
from timm.models.layers import DropPath, trunc_normal_
import torch.nn.functional as F
from utilities import LpLoss
from .sFNO import IO_layer
################... | 10,276 | 35.967626 | 114 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/FNO_residual.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath
#######################################
# Integ... | 5,887 | 34.46988 | 109 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/basics_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
##########################################
# Fully connected Layer
##########################################
class FCLayer(nn.Module):
"""Fully connected layer """
def __init__(self, in_feature, out_feature,
... | 6,354 | 38.228395 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/FNO.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .basics_model import get_grid2D, set_activ, FC_nn
from utilities import LpLoss
#######################################
# Fourier Convolution,
# \int_D k(x-y) v(y) dy
# = \mathcal{F}^{-1}(P \mathcal{F}(v))
###################################... | 6,612 | 39.078788 | 119 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO_epsilon_v1.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath
#######################################
# Integ... | 6,482 | 35.627119 | 110 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, FC_nn, set_activ
import torch.nn.functional as F
from utilities import LpLoss
#######################################
# Integral Operator Layer
#####################... | 5,862 | 35.64375 | 110 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/sFNO_epsilon_v2_updated.py | import pytorch_lightning as pl
import torch
from torch import optim, nn
from .FNO import fourier_conv_2d
from .basics_model import LayerNorm, get_grid2D, set_activ, GroupNorm
import torch.nn.functional as F
from utilities import LpLoss
from timm.models.layers import DropPath, trunc_normal_
import os
from .sFNO_epsilon_... | 10,270 | 38.35249 | 112 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/models/__init__.py | from .basics_model import *
from .FNO import FNO
from .FNO_residual import FNO_residual
from .sFNO_epsilon_v2 import sFNO_epsilon_v2, sFNO_epsilon_v2_proj
from .sFNO import sFNO
from .sFNO_epsilon_v1 import sFNO_epsilon_v1
from .sFNO_epsilon_v2_updated import sFNO_epsilon_v2_updated | 283 | 39.571429 | 66 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/make_graph/make_box_plots.py | import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Freq', add_help=False)
parser.add_argument('-f','--freq', type=int,
default=7)
parser.add_argument('-min','--min', type=fl... | 1,441 | 31.772727 | 73 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/model_factory.py | from models import *
def choosing_model(config):
c_nn = config["model"]
c_train = config["train"]
# 7 Hz data only contains the real part of the field
if config["Project"]["database"]=='GRF_7Hz':
if config["Project"]["name"] == "FNO":
model =FNO(
wavenum... | 7,309 | 42.254438 | 84 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/loss.py | import torch
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reductio... | 1,326 | 27.234043 | 113 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/loading_data.py | import numpy as np
import torch
from bisect import bisect
import os
from torch.utils.data import Dataset, DataLoader
def to_numpy(x):
return x.detach().cpu().numpy()
#files Loader
def MyLoader(GL, do = "train", config = None, args=None):
if config is not None:
batch_size = config['train']['batchsize']
w... | 6,671 | 43.18543 | 158 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/__init__.py | from .loading_data import *
from .loss import LpLoss
from .model_factory import choosing_model
from .plotting_data import *
from .saving_npy_output import * | 156 | 30.4 | 41 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/plotting_data.py | import matplotlib.pyplot as plt
from .loading_data import to_numpy
import os
def plotting(in_, NN_out, out, name, database, PATH,
list_to_plot = None, vmin=-0.5, vmax =0.5,
shrink = 0.8, ksample = 0):
if list_to_plot is None:
list_to_plot = [0,1,2,3,4,5]
print("list_to_pl... | 2,069 | 39.588235 | 104 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/utilities/saving_npy_output.py | import numpy as np
import os
from .loading_data import to_numpy
def saving_files(in_files, out_files, NN_out_files, NN_name, database, PATH, realization_k):
"""
Saving the files in the directory OOD/database/realization_k
"""
saving_dir = f'{PATH}/{database}/realization_{realization_k}'
if not o... | 896 | 39.772727 | 94 | py |
Fine-tuning-NOs | Fine-tuning-NOs-master/visualization_code/._create_trajectory.py |