repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
FPI | FPI-master/models/__init__.py | 0 | 0 | 0 | py | |
WD-selection-function | WD-selection-function-main/notebooks/config.py | font = {
'family': 'monospace',
'color': 'black',
'weight': 'bold',
'size': 20,
}
cmap_r = 'inferno_r'
cmap = 'inferno'
style = {
'font.family': 'serif',
'font.size': 22,
"axes.titlesize": "medium",
"axes.labelsize": "medium",
'axes.edgecolor': '#000000',
"xtick.direction": "o... | 788 | 21.542857 | 32 | py |
RefactorF4Acc | RefactorF4Acc-master/Parser/Combinators.py | #package Parser::Combinators
import types
import re
#
# (c) 2017 Wim Vanderbauwhede
#
VERSION = '0.06'
__all__ =[
'debug',
'apply',
# 'show',
'sequence',
'commaSep',
'choice',
'tryParse',
'maybe',
'regex',
'parens',
'brackets',
'angleBrackets',
'braces',
'char',
'sepBy',
'sepByChar',
'oneOf',... | 26,587 | 31.824691 | 136 | py |
RefactorF4Acc | RefactorF4Acc-master/Parser/test_Combinators.py | from Combinators import *
str51 = 'debug'
str52 = 'debug) '
print('*** create parser ***')
dim_parser2 = {'Dim' : debug } # parens( word ) }
print('*** apply parser ***')
res51 =apply( dim_parser2,str51)
print( '** parser after application: ',dim_parser2)
print('str51: ',res51)
print('*** apply parser again ***')
... | 3,671 | 19.982857 | 89 | py |
Cingulata | Cingulata-master/optim/utils.py | #
# (C) Copyright 2017 CEA LIST. All Rights Reserved.
# Contributor(s): Cingulata team (formerly Armadillo team)
#
# This software is governed by the CeCILL-C license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software un... | 3,978 | 26.631944 | 102 | py |
Cingulata | Cingulata-master/optim/graph_info.py | #
# (C) Copyright 2017 CEA LIST. All Rights Reserved.
# Contributor(s): Cingulata team (formerly Armadillo team)
#
# This software is governed by the CeCILL-C license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software un... | 4,014 | 34.848214 | 111 | py |
tesstrain | tesstrain-main/generate_line_box.py | #!/usr/bin/env python3
import argparse
import io
import unicodedata
from PIL import Image
#
# command line arguments
#
arg_parser = argparse.ArgumentParser('''Creates tesseract box files for given (line) image text pairs''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?', metavar='TXT', help='L... | 1,365 | 28.695652 | 126 | py |
tesstrain | tesstrain-main/shuffle.py | #!/usr/bin/env python3
# shuffle.py - shuffle lines in pseudo random order
#
# Usage:
# shuffle.py [SEED [FILE]]
#
# Sort and shuffle the lines read from stdin in pseudo random order
# and write them to stdout.
#
# If FILE is given, then apply to that in-place (instead of stdin and stdout).
#
# The optional SEED... | 1,031 | 22.454545 | 78 | py |
tesstrain | tesstrain-main/generate_wordstr_box.py | #!/usr/bin/env python3
import argparse
import io
import unicodedata
import bidi.algorithm
from PIL import Image
#
# command line arguments
#
arg_parser = argparse.ArgumentParser('''Creates tesseract WordStr box files for given (line) image text pairs''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', na... | 1,160 | 25.386364 | 126 | py |
tesstrain | tesstrain-main/generate_gt_from_box.py | #!/usr/bin/env python3
import argparse
import io
#
# command line arguments
#
arg_parser = argparse.ArgumentParser(
'''Creates groundtruth files from text2image generated box files''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?',
metavar='TXT', help='Line text (GT... | 967 | 28.333333 | 109 | py |
tesstrain | tesstrain-main/generate_line_syllable_box.py | #!/usr/bin/env python3
import argparse
import io
import unicodedata
from PIL import Image
#
# command line arguments
#
arg_parser = argparse.ArgumentParser('''Creates tesseract box files for given (line) image text pairs''')
# Text ground truth
arg_parser.add_argument('-t', '--txt', nargs='?', metavar='TXT', help='L... | 2,137 | 29.985507 | 126 | py |
tesstrain | tesstrain-main/normalize.py | #!/usr/bin/env python3
import argparse
import io
import unicodedata
# Command line arguments.
arg_parser = argparse.ArgumentParser(description='Normalize all ground truth texts for the given text files.')
arg_parser.add_argument("filename", help="filename of text file", nargs='*')
arg_parser.add_argument("-n", "--dry... | 1,286 | 40.516129 | 136 | py |
tesstrain | tesstrain-main/src/setup.py | from pathlib import Path
import setuptools
ROOT_DIRECTORY = Path(__file__).parent.resolve()
setuptools.setup(
name='tesstrain',
description='Training utils for Tesseract',
long_description=(ROOT_DIRECTORY / 'README.md').read_text(encoding='utf-8'),
long_description_content_type='text/markdown',
... | 1,308 | 30.166667 | 80 | py |
tesstrain | tesstrain-main/src/tesstrain/__main__.py | # (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicab... | 2,242 | 28.12987 | 74 | py |
tesstrain | tesstrain-main/src/tesstrain/language_specific.py | # (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicab... | 37,240 | 25.487198 | 88 | py |
tesstrain | tesstrain-main/src/tesstrain/arguments.py | # (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicab... | 8,597 | 33.95122 | 115 | py |
tesstrain | tesstrain-main/src/tesstrain/generate.py | # (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicab... | 12,537 | 32.257294 | 95 | py |
tesstrain | tesstrain-main/src/tesstrain/wrapper.py | # (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicab... | 5,445 | 40.257576 | 87 | py |
tesstrain | tesstrain-main/src/tesstrain/__init__.py | # (C) Copyright 2014, Google Inc.
# (C) Copyright 2018, James R Barlow
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicab... | 663 | 40.5 | 74 | py |
tesstrain | tesstrain-main/plot/plot_cer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
maxticks=10
dataframe = pd.read_csv("plot_cer.csv",sep='\t', encoding='utf-8')
dataframe['TrainingIteration'] = dataframe['TrainingIteration'].fillna(-2)
dataframe['TrainingIteration'] = dataframe['Tr... | 2,625 | 37.617647 | 110 | py |
tesstrain | tesstrain-main/plot/plot_cer_validation.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
maxticks=10
dataframe = pd.read_csv("plot_cer_validation.csv",sep='\t', encoding='utf-8')
t = dataframe['TrainingIteration']
x = dataframe['LearningIteration']
v = dataframe.ValidationCER
c = datafram... | 2,360 | 34.238806 | 90 | py |
Age-and-Gender-Recognition | Age-and-Gender-Recognition-main/Age and Gender Recognition using Caffe Model - Youtube.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import cv2
import os
os.chdir('D:\Python37\Projects\Gender-and-Age-Detection- Youtube\Gender-and-Age-Detection\models')
# In[33]:
def detectFace(net,frame,confidence_threshold=0.7):
frameOpencvDNN=frame.copy()
print(frameOpencvDNN.shape)
frameHeight=fram... | 2,680 | 29.123596 | 154 | py |
linbp-attack | linbp-attack-master/attack/imagenet/test.py | import os, sys
import torch
import models as MODEL
import torchvision.transforms as T
import torchvision
import argparse
from torch.backends import cudnn
import numpy as np
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='test')
parser.add_argument('--dir', type=str, default='')
args = par... | 4,941 | 35.880597 | 146 | py |
linbp-attack | linbp-attack-master/attack/imagenet/utils.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import numpy as np
from torch.utils.data import Dataset
import csv
import PIL.Image as Image
import os
import torchvision.transforms as T
import pickle
# Selected imagenet. The .csv file format:
# class_index, class, image_name
# 0... | 7,806 | 36 | 142 | py |
linbp-attack | linbp-attack-master/attack/imagenet/attack_resnet50.py | import os, sys
import torch
import torchvision.transforms as T
import torch.nn as nn
import argparse
import torch.nn.functional as F
import torchvision
import models as MODEL
from torch.backends import cudnn
import numpy as np
from utils import SelectedImagenet, Normalize, input_diversity, \
linbp_forw_resnet50, li... | 6,574 | 40.878981 | 132 | py |
linbp-attack | linbp-attack-master/attack/imagenet/models/pnasnet.py | from __future__ import print_function, division, absolute_import
from collections import OrderedDict
import torch
import torch.nn as nn
pretrained_settings = {
'pnasnet5large': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'input_size': [3, 331, 331],
'... | 17,685 | 43.774684 | 88 | py |
linbp-attack | linbp-attack-master/attack/imagenet/models/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models... | 9,898 | 36.496212 | 97 | py |
linbp-attack | linbp-attack-master/attack/imagenet/models/senet.py | from __future__ import print_function, division, absolute_import
from collections import OrderedDict
import math
import torch
import torch.nn as nn
__all__ = ['SENet', 'senet154']
pretrained_settings = {
'senet154': {
'imagenet': {
'url': '-',
'input_space': 'RGB',
'inp... | 13,630 | 34.590078 | 83 | py |
linbp-attack | linbp-attack-master/attack/imagenet/models/__init__.py | from .inceptionv3 import *
from .pnasnet import *
from .senet import *
from .resnet import * | 92 | 22.25 | 26 | py |
linbp-attack | linbp-attack-master/attack/imagenet/models/inceptionv3.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
se... | 11,536 | 36.33657 | 88 | py |
linbp-attack | linbp-attack-master/attack/cifar10/test.py | import os, sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data as data
import torchvision.transforms as transforms
import models
# import numpy as np
import torchvision.datasets as DATASETS
import argparse
import numpy as np
parser = argparse.Ar... | 4,881 | 35.706767 | 153 | py |
linbp-attack | linbp-attack-master/attack/cifar10/utils.py | import os
import torch
import torchvision.transforms as T
from torch.utils.data import Dataset
import torch.nn as nn
import argparse
import models
import torch.nn.functional as F
from torch.backends import cudnn
import pickle
import numpy as np
import csv
import PIL.Image as Image
# Selected cifar-10. The .csv file fo... | 3,961 | 32.016667 | 99 | py |
linbp-attack | linbp-attack-master/attack/cifar10/attack_vgg19.py | import os
import torch
import torchvision.transforms as T
import torch.nn as nn
import argparse
import models
from torch.backends import cudnn
import numpy as np
from utils import Normalize, input_diversity, vgg19_forw, vgg19_ila_forw, ILAProjLoss, SelectedCifar10
parser = argparse.ArgumentParser()
parser.add_argument... | 5,763 | 39.307692 | 150 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/vgg.py |
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
import torch
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
... | 3,731 | 27.707692 | 113 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/pyramidnet.py | import torch
import torch.nn as nn
import math
__all__ = ['pyramidnet272']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def calc_prob(curr_layer, total_layers, p... | 5,819 | 34.487805 | 115 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/densenet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['densenet']
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * gr... | 4,724 | 30.711409 | 99 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/resnext.py | from __future__ import division
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channel... | 5,072 | 43.113043 | 144 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/__init__.py | from __future__ import absolute_import
from .vgg import *
from .resnext import *
from .wrn import *
from .densenet import *
from .pyramidnet import *
from .gdas import * | 172 | 16.3 | 38 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplac... | 3,896 | 40.457447 | 116 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/__init__.py | import os
import os.path as osp
import torch
from models.gdas.lib.scheduler import load_config
from models.gdas.lib.scheduler import load_config
from models.gdas.lib.nas import model_types
from models.gdas.lib.nas import NetworkCIFAR as Network
__all__ = ['gdas']
def gdas(checkpoint_fname):
checkpoint = torch.l... | 697 | 29.347826 | 93 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/__init__.py | 0 | 0 | 0 | py | |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/scheduler/utils.py |
import os, sys, json
from pathlib import Path
from collections import namedtuple
support_types = ('str', 'int', 'bool', 'float')
def convert_param(original_lists):
assert isinstance(original_lists, list), 'The type is not right : {:}'.format(original_lists)
ctype, value = original_lists[0], original_lists[1]
a... | 1,192 | 28.097561 | 95 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/scheduler/scheduler.py |
import torch
from bisect import bisect_right
class MultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gammas, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing in... | 1,124 | 35.290323 | 90 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/scheduler/__init__.py |
from .utils import load_config
from .scheduler import MultiStepLR, obtain_scheduler
| 85 | 20.5 | 52 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/ImageNet.py | import torch
import torch.nn as nn
from .construct_utils import Cell, Transition
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.... | 3,272 | 30.171429 | 85 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/CifarNet.py | import torch
import torch.nn as nn
from .construct_utils import Cell, Transition
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2... | 2,755 | 29.622222 | 89 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from .head_utils import CifarHEAD, ImageNetHEAD
from .operations import OPS, FactorizedReduce, ReLUConvBN
from .genotypes import PRIMITIVES, Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
... | 5,177 | 30.005988 | 128 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/head_utils.py | import torch
import torch.nn as nn
class ImageNetHEAD(nn.Sequential):
def __init__(self, C, stride=2):
super(ImageNetHEAD, self).__init__()
self.add_module('conv1', nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False))
self.add_module('bn1' , nn.BatchNorm2d(C // 2))
self.add_module(... | 729 | 35.5 | 103 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/construct_utils.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from .operations import OPS, FactorizedReduce, ReLUConvBN, Identity
def random_select(length, ratio):
clist = []
index = random.randint(0, length-1)
for i in range(length):
if i == index or random.random() < ratio:
clist.... | 5,003 | 31.705882 | 99 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/__init__.py |
from .model_search import Network
from .CifarNet import NetworkCIFAR
from .ImageNet import NetworkImageNet
# genotypes
from .genotypes import model_types
from .construct_utils import return_alphas_str
| 227 | 21.8 | 46 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/SE_Module.py | import torch
import torch.nn as nn
# Squeeze and Excitation module
class SqEx(nn.Module):
def __init__(self, n_features, reduction=16):
super(SqEx, self).__init__()
if n_features % reduction != 0:
raise ValueError('n_features must be divisible by reduction (default = 16)')
self.linear1 = nn.Line... | 762 | 26.25 | 82 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/genotypes.py | from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal = [
... | 7,085 | 31.063348 | 346 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas/operations.py | import torch
import torch.nn as nn
OPS = {
'none' : lambda C, stride, affine: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine: nn.Sequential(
nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
... | 4,318 | 34.113821 | 129 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/test_dataset.py | import os, sys, torch
import torchvision.transforms as transforms
from .TieredImageNet import TieredImageNet
from .MetaBatchSampler import MetaBatchSampler
root_dir = os.environ['TORCH_HOME'] + '/tiered-imagenet'
print ('root : {:}'.format(root_dir))
means, stds = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
lists =... | 1,324 | 37.970588 | 149 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/test_NLP.py | import os, sys, torch
from .LanguageDataset import SentCorpus, BatchSentLoader
if __name__ == '__main__':
path = '../../data/data/penn'
corpus = SentCorpus( path )
loader = BatchSentLoader(corpus.test, 10)
for i, d in enumerate(loader):
print('{:} :: {:}'.format(i, d.size()))
| 291 | 25.545455 | 56 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/get_dataset_with_transform.py |
import os, sys, torch
import os.path as osp
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from ..utils import Cutout
from .TieredImageNet import TieredImageNet
Dataset2Class = {'cifar10' : 10,
'cifar100': 100,
'... | 3,189 | 40.973684 | 141 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/TieredImageNet.py | from __future__ import print_function
import numpy as np
from PIL import Image
import pickle as pkl
import os, cv2, csv, glob
import torch
import torch.utils.data as data
class TieredImageNet(data.Dataset):
def __init__(self, root_dir, split, transform=None):
self.split = split
self.root_dir = root_dir
... | 3,090 | 35.364706 | 193 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/LanguageDataset.py | import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word... | 3,362 | 26.341463 | 78 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/__init__.py | from .MetaBatchSampler import MetaBatchSampler
from .TieredImageNet import TieredImageNet
from .LanguageDataset import Corpus
from .get_dataset_with_transform import get_datasets
| 179 | 35 | 52 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/datasets/MetaBatchSampler.py | # coding=utf-8
import numpy as np
import torch
class MetaBatchSampler(object):
def __init__(self, labels, classes_per_it, num_samples, iterations):
'''
Initialize MetaBatchSampler
Args:
- labels: an iterable containing all the labels for the current dataset
samples indexes will be infered from ... | 2,497 | 36.848485 | 102 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/utils.py | import torch
import torch.nn as nn
import os, shutil
import numpy as np
def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, use_cuda):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)... | 1,812 | 26.059701 | 133 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/basemodel.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .genotypes import STEPS
from .utils import mask2d, LockedDropout, embedded_dropout
INITRANGE = 0.04
def none_func(x):
return x * 0
class DARTSCell(nn.Module):
def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):
s... | 5,547 | 29.483516 | 102 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/model_search.py | import copy, torch
import torch.nn as nn
import torch.nn.functional as F
from collections import namedtuple
from .genotypes import PRIMITIVES, STEPS, CONCAT, Genotype
from .basemodel import DARTSCell, RNNModel
class DARTSCellSearch(DARTSCell):
def __init__(self, ninp, nhid, dropouth, dropoutx):
super(DARTSCell... | 3,544 | 32.761905 | 124 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/__init__.py | # utils
from .utils import batchify, get_batch, repackage_hidden
# models
from .model_search import RNNModelSearch
from .model_search import DARTSCellSearch
from .basemodel import DARTSCell, RNNModel
# architecture
from .genotypes import DARTS_V1, DARTS_V2
from .genotypes import GDAS
| 285 | 27.6 | 56 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/nas_rnn/genotypes.py | from collections import namedtuple
Genotype = namedtuple('Genotype', 'recurrent concat')
PRIMITIVES = [
'none',
'tanh',
'relu',
'sigmoid',
'identity'
]
STEPS = 8
CONCAT = 8
ENAS = Genotype(
recurrent = [
('tanh', 0),
('tanh', 1),
('relu', 1),
('tanh', 3),
... | 1,057 | 17.892857 | 129 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/save_meta.py | import torch
import os, sys
import os.path as osp
import numpy as np
def tensor2np(x):
if isinstance(x, np.ndarray): return x
if x.is_cuda: x = x.cpu()
return x.numpy()
class Save_Meta():
def __init__(self):
self.reset()
def __repr__(self):
return ('{name}'.format(name=self.__class__.__name__)+'(n... | 1,649 | 31.352941 | 168 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/model_utils.py | import torch
import torch.nn as nn
import numpy as np
def count_parameters_in_MB(model):
if isinstance(model, nn.Module):
return np.sum(np.prod(v.size()) for v in model.parameters())/1e6
else:
return np.sum(np.prod(v.size()) for v in model)/1e6
class Cutout(object):
def __init__(self, length):
sel... | 923 | 24.666667 | 92 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/flop_benchmark.py | import copy, torch
def print_FLOPs(model, shape, logs):
print_log, log = logs
model = copy.deepcopy( model )
model = add_flops_counting_methods(model)
model = model.cuda()
model.eval()
cache_inputs = torch.zeros(*shape).cuda()
#print_log('In the calculating function : cache input size : {:}'.format(cac... | 4,077 | 35.088496 | 103 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/draw_pts.py | import os, sys, time
import numpy as np
import matplotlib
import random
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def draw_points(points, labels, save_path):
title = 'the visualized features'
dpi = 100
width, height = 1000, 1000
legend_fontsize = 10
figsize = width / f... | 1,132 | 25.97619 | 71 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/utils.py | import os, sys, time
import numpy as np
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
... | 4,845 | 34.115942 | 127 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/evaluation_utils.py | import torch
def obtain_accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top... | 453 | 25.705882 | 65 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/gpu_manager.py | import os
class GPUManager():
queries = ('index', 'gpu_name', 'memory.free', 'memory.used', 'memory.total', 'power.draw', 'power.limit')
def __init__(self):
all_gpus = self.query_gpu(False)
def get_info(self, ctype):
cmd = 'nvidia-smi --query-gpu={} --format=csv,noheader'.format(ctype)
lines = os.p... | 2,418 | 33.070423 | 108 | py |
linbp-attack | linbp-attack-master/attack/cifar10/models/gdas/lib/utils/__init__.py |
from .utils import AverageMeter, RecorderMeter, convert_secs2time
from .utils import time_file_str, time_string
from .utils import test_imagenet_data
from .utils import print_log
from .evaluation_utils import obtain_accuracy
#from .draw_pts import draw_points
from .gpu_manager import GPUManager
from .save_meta import... | 452 | 29.2 | 65 | py |
s2anet | s2anet-master/setup.py | import os
import platform
import subprocess
import time
from setuptools import Extension, dist, find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
dist.Distribution().fetch_build_eggs(['Cython', 'numpy>=1.11.1'])
import numpy as np # noqa: E402, isort:skip
from Cython.Build impo... | 8,115 | 32.958159 | 111 | py |
s2anet | s2anet-master/tools/test.py | import argparse
import os
import os.path as osp
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint
from mmdet.apis import init_dist
from mmdet.core import coc... | 8,637 | 35.447257 | 79 | py |
s2anet | s2anet-master/tools/voc_eval.py | from argparse import ArgumentParser
import mmcv
import numpy as np
from mmdet import datasets
from mmdet.core import eval_map
def voc_eval(result_file, dataset, iou_thr=0.5):
det_results = mmcv.load(result_file)
gt_bboxes = []
gt_labels = []
gt_ignore = []
for i in range(len(dataset)):
a... | 1,819 | 27.888889 | 69 | py |
s2anet | s2anet-master/tools/convert_model.py | import argparse
import subprocess
from collections import OrderedDict
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output c... | 1,357 | 29.863636 | 77 | py |
s2anet | s2anet-master/tools/get_flops.py | import argparse
from mmcv import Config
from mmdet.models import build_detector
from mmdet.utils import get_model_complexity_info
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'... | 1,401 | 25.45283 | 73 | py |
s2anet | s2anet-master/tools/publish_model.py | import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = par... | 1,028 | 27.583333 | 77 | py |
s2anet | s2anet-master/tools/analyze_logs.py | import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i],
... | 6,272 | 34.044693 | 79 | py |
s2anet | s2anet-master/tools/upgrade_model_version.py | import argparse
import re
from collections import OrderedDict
import torch
def convert(in_file, out_file):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest... | 1,322 | 29.767442 | 77 | py |
s2anet | s2anet-master/tools/test_robustness.py | import argparse
import copy
import os
import os.path as osp
import shutil
import tempfile
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint
from pycocotools.coco import ... | 17,478 | 35.953488 | 79 | py |
s2anet | s2anet-master/tools/coco_error_analysis.py | import copy
import os
from argparse import ArgumentParser
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def makeplot(rs, ps, outDir, class_name, iou_type):
cs = np.vstack([
np.ones((2, 3)),
... | 6,784 | 37.771429 | 78 | py |
s2anet | s2anet-master/tools/robustness_eval.py | import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
def print_coco_results(results):
def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
iStr = ' {:<18} {} @[ IoU={:<9} | \
area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average ... | 8,374 | 31.587549 | 79 | py |
s2anet | s2anet-master/tools/train.py | from __future__ import division
import argparse
import os
import os.path as osp
import warnings
import torch
from mmcv import Config
from mmdet import __version__
from mmdet.apis import (get_root_logger, init_dist, set_random_seed,
train_detector)
from mmdet.datasets import build_dataset
from... | 3,887 | 31.672269 | 83 | py |
s2anet | s2anet-master/tools/coco_eval.py | from argparse import ArgumentParser
from mmdet.core import coco_eval
def main():
parser = ArgumentParser(description='COCO Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('--ann', help='annotation file path')
parser.add_argument(
'--types',
type... | 914 | 28.516129 | 79 | py |
s2anet | s2anet-master/tools/detectron2pytorch.py | import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy... | 3,830 | 42.044944 | 78 | py |
s2anet | s2anet-master/tools/convert_datasets/pascal_voc.py | import argparse
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from mmdet.core import voc_classes
label_ids = {name: i + 1 for i, name in enumerate(voc_classes())}
def parse_xml(args):
xml_path, img_path = args
tree = ET.parse(xml_path)
root = tree.getroot()
... | 4,612 | 31.485915 | 79 | py |
s2anet | s2anet-master/DOTA_devkit/dota_evaluation_task1.py | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, ... | 11,008 | 35.333333 | 149 | py |
s2anet | s2anet-master/DOTA_devkit/SplitOnlyImage_multi_process.py | import copy
import os
from functools import partial
from multiprocessing import Pool
import cv2
import dota_utils as util
import numpy as np
def split_single_warp(name, split_base, rate, extent):
split_base.SplitSingle(name, rate, extent)
class splitbase():
def __init__(self,
srcpath,
... | 3,649 | 30.73913 | 96 | py |
s2anet | s2anet-master/DOTA_devkit/dota-v1.5_evaluation_task2.py | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, ... | 8,788 | 35.774059 | 144 | py |
s2anet | s2anet-master/DOTA_devkit/dota-v1.5_evaluation_task1.py | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, ... | 10,688 | 34.277228 | 124 | py |
s2anet | s2anet-master/DOTA_devkit/hrsc2016_evaluation.py | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, ... | 10,349 | 33.5 | 150 | py |
s2anet | s2anet-master/DOTA_devkit/dota_evaluation_task2.py | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, ... | 8,788 | 35.020492 | 159 | py |
s2anet | s2anet-master/DOTA_devkit/ResultMerge_multi_process.py | """
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"... | 8,833 | 30.663082 | 124 | py |
s2anet | s2anet-master/DOTA_devkit/convert_dota_to_mmdet.py | import os
import os.path as osp
import mmcv
import numpy as np
from PIL import Image
from mmdet.core import poly_to_rotated_box_single
wordname_15 = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court'... | 3,332 | 38.678571 | 95 | py |
s2anet | s2anet-master/DOTA_devkit/dota_utils.py | import codecs
import math
import os
import re
import sys
import numpy as np
import shapely.geometry as shgeo
"""
some basic functions which are useful for process DOTA data
"""
wordname_15 = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'te... | 9,669 | 33.412811 | 117 | py |
s2anet | s2anet-master/DOTA_devkit/__init__.py | 0 | 0 | 0 | py | |
s2anet | s2anet-master/DOTA_devkit/prepare_dota1_ms.py | import os
import os.path as osp
from DOTA_devkit.ImgSplit_multi_process import splitbase as splitbase_trainval
from DOTA_devkit.SplitOnlyImage_multi_process import splitbase as splitbase_test
from DOTA_devkit.convert_dota_to_mmdet import convert_dota_to_mmdet
def mkdir_if_not_exists(path):
if not osp.exists(path... | 2,130 | 39.207547 | 112 | py |
s2anet | s2anet-master/DOTA_devkit/ImgSplit_multi_process.py | """
-------------
This is the multi-process version
"""
import codecs
import copy
import math
import os
from functools import partial
from multiprocessing import Pool
import cv2
import dota_utils as util
import numpy as np
import shapely.geometry as shgeo
from dota_utils import GetFileFromThisRootDir
def choose_best... | 12,045 | 38.237785 | 124 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.