repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
FishFSRNet | FishFSRNet-main/parsing/test_parsingnet.py | from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import dataset_parsingnet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import util
import torchvision
import parsingnet
net = parsingnet.ParsingNet()
net = util.prepa... | 1,315 | 36.6 | 108 | py |
FishFSRNet | FishFSRNet-main/parsing/parsingnet.py | import common
import torch.nn as nn
class ParsingNet(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(ParsingNet, self).__init__()
n_resblocks = 8
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
self.args = args
m_head = [conv(args.n_co... | 906 | 24.914286 | 77 | py |
FishFSRNet | FishFSRNet-main/parsing/option.py | import argparse
# import os
parser = argparse.ArgumentParser(description='FaceSR')
parser.add_argument('--cpu', action='store_true',
help='use cpu only')
parser.add_argument('--n_GPUs', type=int, default=1,
help='number of GPUs')
parser.add_argument('--cuda_name', type=str, def... | 3,257 | 36.883721 | 76 | py |
FishFSRNet | FishFSRNet-main/parsing/cbam.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=None):
super(ChannelGate, self).__init__()
... | 3,309 | 34.978261 | 119 | py |
FishFSRNet | FishFSRNet-main/parsing/common.py | import torch.nn as nn
import torch
import cbam
import math
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel... | 8,996 | 34.007782 | 127 | py |
FishFSRNet | FishFSRNet-main/parsing/util.py |
import torch
import numpy as np
import math
import cv2
def prepare(arg):
if torch.cuda.is_available():
# print(1)
arg = arg.cuda()
return arg
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [... | 4,380 | 28.601351 | 92 | py |
FishFSRNet | FishFSRNet-main/parsing/main_parsingnet.py | import torch
import torch.optim as optim
from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch.nn as nn
import dataset_parsingnet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import util
import torchvision
from parsingnet import P... | 2,264 | 40.181818 | 109 | py |
FishFSRNet | FishFSRNet-main/parsing/dataset_parsingnet.py | from torch.utils import data
import os
from PIL import Image
from torchvision import transforms
from torchvision.transforms import ToTensor
import numpy
import glob
class Data(data.Dataset):
def __init__(self, root, args, train=False):
# 返回指定路径下的文件和文件夹列表。
self.args = args
if args.scale == ... | 1,638 | 31.78 | 88 | py |
FishFSRNet | FishFSRNet-main/fsr/fishfsrnet.py | import common
import torch.nn.functional as F
import torch.nn as nn
import torch
def fish_block(args, conv=common.default_conv, n_feats=64, PCSR1=False):
kernel_size = 3
res = []
act = nn.ReLU(True)
if PCSR1:
res.append(common.PCSR1(
conv, n_feats, kernel_size, act=act, res_scale... | 7,540 | 32.665179 | 107 | py |
FishFSRNet | FishFSRNet-main/fsr/test.py | from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import dataset_parsing
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import util
import torchvision
from fishfsrnet import FISHNET
net = FISHNET(args)
net = util.prepa... | 1,520 | 39.026316 | 105 | py |
FishFSRNet | FishFSRNet-main/fsr/option.py | import argparse
# import os
parser = argparse.ArgumentParser(description='FaceSR')
parser.add_argument('--cpu', action='store_true',
help='use cpu only')
parser.add_argument('--n_GPUs', type=int, default=1,
help='number of GPUs')
parser.add_argument('--cuda_name', type=str, def... | 3,257 | 36.883721 | 76 | py |
FishFSRNet | FishFSRNet-main/fsr/cbam.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=None):
super(ChannelGate, self).__init__()
... | 3,309 | 34.978261 | 119 | py |
FishFSRNet | FishFSRNet-main/fsr/common.py | import torch.nn as nn
import torch
import cbam
import math
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel... | 8,996 | 34.007782 | 127 | py |
FishFSRNet | FishFSRNet-main/fsr/util.py |
import torch
import numpy as np
import math
import cv2
def prepare(arg):
if torch.cuda.is_available():
# print(1)
arg = arg.cuda()
return arg
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [... | 4,380 | 28.601351 | 92 | py |
FishFSRNet | FishFSRNet-main/fsr/dataset_parsing.py | from torch.utils import data
import os
from PIL import Image
from torchvision.transforms import ToTensor
import numpy
import glob
import random
import numpy as np
def augment(lr, hr, p, hflip=True, rot=True):
# def _augment(img):
# if hflip: img = img[:, ::-1, :]
# if vflip: img = img[::-1, :, :]
... | 3,235 | 31.36 | 88 | py |
FishFSRNet | FishFSRNet-main/fsr/main_parsing.py | from option import args
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_name
import torch
import torch.optim as optim
import torch.nn as nn
import dataset_parsing
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import util
from fishfsrnet import FISHNET
net = FISHNE... | 2,671 | 40.75 | 106 | py |
omni3d | omni3d-main/tools/__init__.py | 0 | 0 | 0 | py | |
omni3d | omni3d-main/tools/train_net.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import os
import sys
import numpy as np
import copy
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import torch.distributed as dist
import detectron2.utils.comm as comm
from detectron2.checkpoint i... | 18,388 | 35.056863 | 138 | py |
omni3d | omni3d-main/demo/demo.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import os
import argparse
import sys
import numpy as np
from collections import OrderedDict
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser... | 7,175 | 34.349754 | 158 | py |
omni3d | omni3d-main/cubercnn/solver/checkpoint.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.checkpoint import PeriodicCheckpointer
from typing import Any
class PeriodicCheckpointerOnlyOne(PeriodicCheckpointer):
def step(self, iteration: int, **kwargs: Any) -> None:
"""
Perform the appropriate action at the given iteration... | 1,060 | 36.892857 | 87 | py |
omni3d | omni3d-main/cubercnn/solver/__init__.py | from .build import *
from .checkpoint import * | 46 | 22.5 | 25 | py |
omni3d | omni3d-main/cubercnn/solver/build.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import torch
from typing import Any, Dict, List, Set
from detectron2.solver.build import maybe_add_gradient_clipping
def build_optimizer(cfg, model):
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
... | 2,963 | 37.493506 | 100 | py |
omni3d | omni3d-main/cubercnn/evaluation/omni3d_evaluation.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import contextlib
import copy
import datetime
import io
import itertools
import json
import logging
import os
import time
from collections import defaultdict
from typing import List, Union
from typing import Tuple
import numpy as np
import pycocotools.mask as maskUti... | 65,081 | 37.171261 | 168 | py |
omni3d | omni3d-main/cubercnn/evaluation/__init__.py | from .omni3d_evaluation import * | 32 | 32 | 32 | py |
omni3d | omni3d-main/cubercnn/config/config.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.config import CfgNode as CN
def get_cfg_defaults(cfg):
# A list of category names which will be used
cfg.DATASETS.CATEGORY_NAMES = []
# The category names which will be treated as ignore
# e.g., not counting as background during trai... | 6,155 | 37.716981 | 82 | py |
omni3d | omni3d-main/cubercnn/config/__init__.py | from .config import * | 21 | 21 | 21 | py |
omni3d | omni3d-main/cubercnn/vis/vis.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import math
import torch
from copy import deepcopy
from pytorch3d.structures.meshes import join_meshes_as_scene
from pytorch3d.transforms.so3 import (
so3_relative_angle,
)
from matplotlib.pat... | 29,091 | 38.154778 | 206 | py |
omni3d | omni3d-main/cubercnn/vis/logperf.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from termcolor import colored
import itertools
from tabulate import tabulate
import logging
logger = logging.getLogger(__name__)
def print_ap_category_histogram(dataset, results):
"""
Prints AP performance for each category.
Args:
results: dictio... | 3,654 | 29.974576 | 195 | py |
omni3d | omni3d-main/cubercnn/vis/__init__.py | from .vis import * | 19 | 19 | 19 | py |
omni3d | omni3d-main/cubercnn/util/model_zoo.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.utils.file_io import PathHandler, PathManager
__all__ = ["CubeRCNNHandler"]
class CubeRCNNHandler(PathHandler):
"""
Resolves CubeRCNN's model zoo files.
"""
PREFIX = "cubercnn://"
CUBERCNN_PREFIX = "https://dl.fbaipublicfiles.co... | 724 | 28 | 75 | py |
omni3d | omni3d-main/cubercnn/util/math_util.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import math
import numpy as np
import pandas as pd
from typing import Tuple, List
from copy import copy
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.renderer.mesh.renderer import MeshRenderer
from pytorch3d.renderer.mesh.shader import SoftPhongSh... | 31,079 | 30.779141 | 167 | py |
omni3d | omni3d-main/cubercnn/util/util.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import json
import pickle
import cv2
from time import time
import numpy as np
import os
import shutil
import scipy.io
from PIL import Image
from glob import glob
from difflib import SequenceMatcher
import matplotlib.colors as mplc
def file_parts(file_path):
bas... | 14,651 | 47.356436 | 96 | py |
omni3d | omni3d-main/cubercnn/util/__init__.py | from .util import *
from .model_zoo import *
from .math_util import * | 69 | 22.333333 | 24 | py |
omni3d | omni3d-main/cubercnn/data/builtin.py | # Copyright (c) Meta Platforms, Inc. and affiliates
def get_omni3d_categories(dataset="omni3d"):
"""
Returns the Omni3D categories for dataset
Args:
dataset: str
Returns:
cats: set of strings with category names
"""
if dataset == "omni3d":
cats = set({'chair', 'table', ... | 3,848 | 82.673913 | 534 | py |
omni3d | omni3d-main/cubercnn/data/dataset_mapper.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import copy
import torch
import numpy as np
from detectron2.structures import BoxMode, Keypoints
from detectron2.data import detection_utils
from detectron2.data import transforms as T
from detectron2.data import (
DatasetMapper
)
from detectron2.structures import... | 5,231 | 32.538462 | 128 | py |
omni3d | omni3d-main/cubercnn/data/datasets.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import json
import time
import os
import contextlib
import io
import logging
import numpy as np
from pycocotools.coco import COCO
from collections import defaultdict
from fvcore.common.timer import Timer
from detectron2.utils.file_io import PathManager
from detectron2... | 16,471 | 35.685969 | 141 | py |
omni3d | omni3d-main/cubercnn/data/__init__.py | from .datasets import *
from .dataset_mapper import *
from .build import *
from .builtin import * | 97 | 23.5 | 29 | py |
omni3d | omni3d-main/cubercnn/data/build.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import itertools
import logging
import numpy as np
import math
from collections import defaultdict
import torch.utils.data
from detectron2.config import configurable
from detectron2.utils.logger import _log_api_usage
from detectron2.data.catalog import DatasetCatalo... | 9,407 | 39.551724 | 128 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/dla.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import detectron2.utils.comm as comm
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone impor... | 18,904 | 36.287968 | 98 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/resnet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
from detectron2.modeling.backbone.resnet import build_resnet_backbone
from detectron2... | 3,333 | 33.371134 | 113 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/mnasnet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
... | 1,936 | 29.265625 | 89 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/densenet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
... | 1,952 | 29.515625 | 95 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/shufflenet.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision import models
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
import torch.nn.functional as F
from detectron2.modeling.backbone.fpn import FPN
... | 2,113 | 29.2 | 91 | py |
omni3d | omni3d-main/cubercnn/modeling/backbone/__init__.py | from .densenet import *
from .mnasnet import *
from .resnet import *
from .shufflenet import *
from .dla import * | 118 | 22.8 | 26 | py |
omni3d | omni3d-main/cubercnn/modeling/meta_arch/__init__.py | from .rcnn3d import * | 21 | 21 | 21 | py |
omni3d | omni3d-main/cubercnn/modeling/meta_arch/rcnn3d.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from typing import Dict, List, Optional
import torch
import numpy as np
from detectron2.layers import ShapeSpec, batched_nms
from detectron2.utils.visualizer import Visualizer
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures ... | 11,688 | 41.974265 | 171 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/fast_rcnn.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from re import L
import torch
from torch.nn import functional as F
from typing import List, Tuple
from fvcore.nn import giou_loss, smooth_l1_loss
from detectron2.utils.events import get_event_storage
from detectron2.layers import cat, cross_entropy, nonzero_tuple, ba... | 11,154 | 41.576336 | 113 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/cube_head.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from detectron2.utils.registry import Registry
from typing import Dict
from detectron2.layers import ShapeSpec
from torch import nn
import torch
import numpy as np
import fvcore.nn.weight_init as weight_init
from pytorch3d.transforms.rotation_conversions import _copy... | 8,064 | 38.925743 | 96 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/roi_heads.py | # Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import numpy as np
import cv2
from typing import Dict, List, Tuple
import torch
from torch import nn
import torch.nn.functional as F
from pytorch3d.transforms.so3 import (
so3_relative_angle
)
from detectron2.config import configurable
from detectro... | 41,015 | 42.634043 | 151 | py |
omni3d | omni3d-main/cubercnn/modeling/roi_heads/__init__.py | from .roi_heads import * | 24 | 24 | 24 | py |
omni3d | omni3d-main/cubercnn/modeling/proposal_generator/rpn.py | # Copyright (c) Meta Platforms, Inc. and affiliates
from typing import Dict, List, Tuple
import torch
from typing import List, Tuple, Union
import torch.nn.functional as F
from detectron2.config import configurable
from detectron2.utils.events import get_event_storage
from detectron2.layers import ShapeSpec, cat
from d... | 15,229 | 42.022599 | 141 | py |
omni3d | omni3d-main/cubercnn/modeling/proposal_generator/__init__.py | from .rpn import *
| 19 | 9 | 18 | py |
VLC-BERT | VLC-BERT-master/vqa/test.py | import _init_paths
import os
import argparse
from copy import deepcopy
from vqa.function.config import config, update_config
from vqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Get Test Result of VQA Network')
parser.add_argument('--cfg', type=str, help='path to answer ... | 1,525 | 32.173913 | 108 | py |
VLC-BERT | VLC-BERT-master/vqa/_init_paths.py | import os
import sys
this_dir = os.path.abspath(os.path.dirname(__file__))
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
root_path = os.path.join(this_dir, '../')
add_path(root_path)
| 224 | 15.071429 | 53 | py |
VLC-BERT | VLC-BERT-master/vqa/train_end2end.py | import _init_paths
import os
import argparse
import torch
import subprocess
from vqa.function.config import config, update_config
from vqa.function.train import train_net
from vqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('... | 2,191 | 33.793651 | 113 | py |
VLC-BERT | VLC-BERT-master/vqa/function/val.py | from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_... | 528 | 26.842105 | 95 | py |
VLC-BERT | VLC-BERT-master/vqa/function/test.py | import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from vqa.data.build import m... | 3,359 | 39.481928 | 120 | py |
VLC-BERT | VLC-BERT-master/vqa/function/config.py | from easydict import EasyDict as edict
import yaml
_C = edict()
config = _C
# ------------------------------------------------------------------------------------- #
# Common options
# ------------------------------------------------------------------------------------- #
_C.RNG_SEED = -1
_C.OUTPUT_PATH = ''
_C.MODUL... | 7,553 | 36.211823 | 108 | py |
VLC-BERT | VLC-BERT-master/vqa/function/__init__.py | 0 | 0 | 0 | py | |
VLC-BERT | VLC-BERT-master/vqa/function/train.py | import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_log... | 17,541 | 51.053412 | 147 | py |
VLC-BERT | VLC-BERT-master/vqa/modules/resnet_vlbert_for_vqa.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_b... | 16,341 | 47.064706 | 117 | py |
VLC-BERT | VLC-BERT-master/vqa/modules/__init__.py | from .resnet_vlbert_for_vqa import ResNetVLBERT
| 50 | 11.75 | 47 | py |
VLC-BERT | VLC-BERT-master/vqa/data/__init__.py | 0 | 0 | 0 | py | |
VLC-BERT | VLC-BERT-master/vqa/data/collate_batch.py | import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self,... | 2,035 | 35.357143 | 115 | py |
VLC-BERT | VLC-BERT-master/vqa/data/build.py | import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'vqa': VQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset not in ... | 4,336 | 42.37 | 106 | py |
VLC-BERT | VLC-BERT-master/vqa/data/datasets/vqa.py | import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import pprint
import logging
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipreader impor... | 21,774 | 45.527778 | 127 | py |
VLC-BERT | VLC-BERT-master/vqa/data/datasets/__init__.py | from .vqa import VQA
| 22 | 6.666667 | 20 | py |
VLC-BERT | VLC-BERT-master/vqa/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enfo... | 4,846 | 40.42735 | 88 | py |
VLC-BERT | VLC-BERT-master/vqa/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(S... | 2,568 | 37.924242 | 86 | py |
VLC-BERT | VLC-BERT-master/vqa/data/samplers/__init__.py | from .distributed import DistributedSampler
from .grouped_batch_sampler import GroupedBatchSampler
| 100 | 24.25 | 54 | py |
VLC-BERT | VLC-BERT-master/vqa/data/transforms/__init__.py | from .transforms import Compose
from .transforms import Resize
from .transforms import RandomHorizontalFlip
from .transforms import ToTensor
from .transforms import Normalize
from .build import build_transforms
| 212 | 25.625 | 44 | py |
VLC-BERT | VLC-BERT-master/vqa/data/transforms/build.py | from . import transforms as T
def build_transforms(cfg, mode='train'):
assert mode in ['train', 'test', 'val']
min_size = cfg.SCALES[0]
max_size = cfg.SCALES[1]
assert min_size <= max_size
if mode == 'train':
flip_prob = cfg.TRAIN.FLIP_PROB
elif mode == 'test':
flip_prob = cfg... | 1,034 | 23.069767 | 85 | py |
VLC-BERT | VLC-BERT-master/vqa/data/transforms/transforms.py | import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
... | 4,104 | 30.821705 | 97 | py |
VLC-BERT | VLC-BERT-master/aokvqa/test.py | import _init_paths
import os
import argparse
from copy import deepcopy
from aokvqa.function.config import config, update_config
from aokvqa.function.test import test_net
def parse_args():
parser = argparse.ArgumentParser('Get Test Result of OK-VQA Network')
parser.add_argument('--cfg', type=str, help='path t... | 1,533 | 32.347826 | 108 | py |
VLC-BERT | VLC-BERT-master/aokvqa/_init_paths.py | import os
import sys
this_dir = os.path.abspath(os.path.dirname(__file__))
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
root_path = os.path.join(this_dir, '../')
add_path(root_path)
| 224 | 15.071429 | 53 | py |
VLC-BERT | VLC-BERT-master/aokvqa/train_end2end.py | import _init_paths
import os
import argparse
import torch
import subprocess
from aokvqa.function.config import config, update_config
from aokvqa.function.train import train_net
from aokvqa.function.test import test_net
from external.PythonEvaluationTools.aokvqa_vqaEval import run_eval
def parse_args():
parser = ... | 2,328 | 34.830769 | 113 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/val.py | from collections import namedtuple
import torch
from common.trainer import to_cuda
@torch.no_grad()
def do_validation(net, val_loader, metrics, label_index_in_batch):
net.eval()
metrics.reset()
for nbatch, batch in enumerate(val_loader):
batch = to_cuda(batch)
label = batch[label_index_in_... | 528 | 26.842105 | 95 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/test.py | import os
import pprint
import shutil
import json
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from aokvqa.data.build impor... | 3,526 | 40.494118 | 162 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/config.py | from easydict import EasyDict as edict
import yaml
_C = edict()
config = _C
# ------------------------------------------------------------------------------------- #
# Common options
# ------------------------------------------------------------------------------------- #
_C.RNG_SEED = -1
_C.OUTPUT_PATH = ''
_C.MODUL... | 7,737 | 36.201923 | 108 | py |
VLC-BERT | VLC-BERT-master/aokvqa/function/__init__.py | 0 | 0 | 0 | py | |
VLC-BERT | VLC-BERT-master/aokvqa/function/train.py | import os
import pprint
import shutil
import inspect
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from common.utils.create_logger import create_log... | 17,600 | 51.228487 | 147 | py |
VLC-BERT | VLC-BERT-master/aokvqa/modules/resnet_vlbert_for_aokvqa.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from external.pytorch_pretrained_bert import BertTokenizer
from external.pytorch_pretrained_bert.modeling import BertPredictionHeadTransform
from common.module import Module
from common.fast_rcnn import FastRCNN
from common.visual_linguistic_b... | 22,529 | 50.674312 | 156 | py |
VLC-BERT | VLC-BERT-master/aokvqa/modules/__init__.py | from .resnet_vlbert_for_aokvqa import ResNetVLBERT
| 53 | 12.5 | 50 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/__init__.py | 0 | 0 | 0 | py | |
VLC-BERT | VLC-BERT-master/aokvqa/data/collate_batch.py | import torch
from common.utils.clip_pad import *
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self,... | 2,295 | 37.266667 | 115 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/build.py | import torch.utils.data
from .datasets import *
from . import samplers
from .transforms.build import build_transforms
from .collate_batch import BatchCollator
import pprint
DATASET_CATALOGS = {'aokvqa': AOKVQA}
def build_dataset(dataset_name, *args, **kwargs):
assert dataset_name in DATASET_CATALOGS, "dataset n... | 4,753 | 44.27619 | 106 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/datasets/aokvqa.py | import os
import json
import _pickle as cPickle
from PIL import Image
import re
import base64
import numpy as np
import csv
import sys
import time
import logging
import pickle5 as pickle
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from common.utils.zipr... | 21,774 | 42.812877 | 171 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/datasets/__init__.py | from .aokvqa import AOKVQA
| 28 | 8.666667 | 26 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enfo... | 4,846 | 40.42735 | 88 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class DistributedSampler(S... | 2,568 | 37.924242 | 86 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/samplers/__init__.py | from .distributed import DistributedSampler
from .grouped_batch_sampler import GroupedBatchSampler
| 100 | 24.25 | 54 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/transforms/__init__.py | from .transforms import Compose
from .transforms import Resize
from .transforms import RandomHorizontalFlip
from .transforms import ToTensor
from .transforms import Normalize
from .build import build_transforms
| 212 | 25.625 | 44 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/transforms/build.py | from . import transforms as T
def build_transforms(cfg, mode='train'):
assert mode in ['train', 'test', 'val']
min_size = cfg.SCALES[0]
max_size = cfg.SCALES[1]
assert min_size <= max_size
if mode == 'train':
flip_prob = cfg.TRAIN.FLIP_PROB
elif mode == 'test':
flip_prob = cfg... | 1,034 | 23.069767 | 85 | py |
VLC-BERT | VLC-BERT-master/aokvqa/data/transforms/transforms.py | import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes, masks, im_info, flipped):
for t in self.transforms:
... | 4,104 | 30.821705 | 97 | py |
VLC-BERT | VLC-BERT-master/external/PythonEvaluationTools/aokvqa_vqaEval.py | import argparse
import json
import os
def load_aokvqa(aokvqa_dir, split, version='v1p0'):
#assert split in ['train', 'val', 'test', 'test_w_ans', 'val_pruned']
dataset = json.load(open(
os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json")
))
return dataset
def get_coco_path(split, image_... | 2,667 | 31.536585 | 93 | py |
VLC-BERT | VLC-BERT-master/external/PythonEvaluationTools/okvqa_vqaEval.py | # coding: utf-8
import argparse
import json
from external.PythonEvaluationTools.vqaEval import VQAEval
from external.PythonEvaluationTools.vqa_helper import VQA
def run_eval(resFile=None, save_path=None, pruned=False):
# set up file names and paths
taskType = 'OpenEnded'
dataType = 'mscoco'
dataSubTy... | 3,266 | 39.8375 | 136 | py |
VLC-BERT | VLC-BERT-master/external/PythonEvaluationTools/vqaEval.py | # coding=utf-8
__author__='aagrawal'
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py).
import sys
import re
class VQAEval:
def __init__(self, vqa, vqaRes, n=2):
self.n =... | 8,197 | 43.075269 | 156 | py |
VLC-BERT | VLC-BERT-master/external/PythonEvaluationTools/vqa_helper.py | __author__ = 'aagrawal'
__version__ = '0.9'
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:... | 8,063 | 43.552486 | 242 | py |
VLC-BERT | VLC-BERT-master/external/PythonEvaluationTools/__init__.py | 0 | 0 | 0 | py | |
VLC-BERT | VLC-BERT-master/external/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENS... | 6,803 | 40.742331 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.