enable offline mode
Browse files- binding_affinity.py +12 -7
binding_affinity.py
CHANGED
|
@@ -46,11 +46,12 @@ _LICENSE = "BSD two-clause"
|
|
| 46 |
# TODO: Add link to the official dataset URLs here
|
| 47 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
| 48 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 49 |
-
_URL = "https://huggingface.co/datasets/jglaser/binding_affinity/resolve/main/
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
|
|
|
| 54 |
|
| 55 |
|
| 56 |
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
|
@@ -124,8 +125,12 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
|
|
| 124 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
| 125 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 126 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 127 |
-
files =
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
return [
|
| 130 |
datasets.SplitGenerator(
|
| 131 |
# These kwargs will be passed to _generate_examples
|
|
|
|
| 46 |
# TODO: Add link to the official dataset URLs here
|
| 47 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
| 48 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 49 |
+
_URL = "https://huggingface.co/datasets/jglaser/binding_affinity/resolve/main/"
|
| 50 |
+
|
| 51 |
+
_file_names = {'default': 'data/all.parquet',
|
| 52 |
+
'no_kras': 'data/all_nokras.parquet'}
|
| 53 |
+
|
| 54 |
+
_URLs = {name: _URL+_file_names[name] for name in _file_names}
|
| 55 |
|
| 56 |
|
| 57 |
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
|
|
|
| 125 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
| 126 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 127 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 128 |
+
files = _file_names
|
| 129 |
+
try:
|
| 130 |
+
files = dl_manager.download_and_extract(_URLs)
|
| 131 |
+
except:
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
return [
|
| 135 |
datasets.SplitGenerator(
|
| 136 |
# These kwargs will be passed to _generate_examples
|