code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.http import HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from chatbots.models import BotMessage
import json
import urllib2
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
@csrf_exempt
def pong(request):
# test add new message
# p = BotMessage(message_text="What's your name?", intent_text="check_botinfo", intent_arg1="None", intent_arg2="None", intent_arg3="None", pub_date=timezone.now())
# p.save()
return render(request, "index.html",
{
'unicode_test' : 'ทดสอบภาษาไทย',
'welcome_title': 'Hello, django'
})
def bot(request):
# get bot messages
arrBotMessages = BotMessage.objects.all()
return render(request, "bot.html",
{
'welcome_title': 'This is bot messages:',
'bot_messages' : arrBotMessages
})
@csrf_exempt
def botcallback(request):
token = None
if request.method == 'POST':
token = _getTokenFromRequestBody(request.body)
data = json.dumps({
"replyToken":token,
"messages":[{
"type":"text",
"text": "test"
}]
})
_postback(data, token)
else :
HttpResponseNotAllowed("Method Not Allowed")
return HttpResponse("token: %s" % token)
def _getTokenFromRequestBody(body):
try:
json_request = json.loads(body)
events_obj = json_request['events']
event_obj = events_obj[0]
token = event_obj['replyToken']
return token
except KeyError:
HttpResponseServerError("Malformed data!")
def _postback(data, token):
url = 'https://api.line.me/v2/bot/message/reply'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=json.dumps(data))
request.add_header("Content-Type", "application/json; charset=UTF-8")
request.add_header("Authorization", "Bearer %s" % token)
opener.open(request)
| pongem/python-bot-project | appengine/standard/botapp/chatbots/views.py | Python | apache-2.0 | 2,535 |
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
from __future__ import print_function
import socket
# ============= local library imports ==========================
from six.moves.socketserver import BaseRequestHandler
class MessagingHandler(BaseRequestHandler):
_verbose = False
def handle(self):
"""
"""
data = self.get_packet()
if data is not None:
if self._verbose:
self.server.info('Received: %s' % data.strip())
try:
addr = self.client_address[0]
except IndexError:
addr = self.client_address
response = self.server.get_response(self.server.processor_type, data, addr)
if response is not None:
self.send_packet(response)
if 'ERROR 6' in response:
self.server.increment_repeater_fails()
#
if self._verbose:
self.server.info('Sent: %s' % response.strip())
self.server.parent.cur_rpacket = data
if len(response) > 20:
response = '{}...'.format(response[:20])
self.server.parent.cur_spacket = response
self.server.increment_packets_received()
self.server.increment_packets_sent()
def get_packet(self):
"""
"""
raise NotImplementedError
def send_packet(self, resp):
"""
"""
raise NotImplementedError
def _response_blocks(self, resp, blocksize=1024):
s = 0
n = len(resp)
while s < n:
yield resp[s:s + blocksize]
s += blocksize
def _send_packet(self, response, send):
response = '{}\n'.format(response)
mlen = len(response)
totalsent = 0
gen = self._response_blocks(response)
while totalsent < mlen:
try:
msg = next(gen)
except StopIteration:
break
try:
totalsent += send(msg)
# totalsent += sock.sendto(msg, self.client_address)
#print 'totalsent={} total={}'.format(totalsent, mlen)
except socket.error as e:
print('exception', e)
continue
# ============= EOF ====================================
| UManPychron/pychron | pychron/messaging/handlers/messaging_handler.py | Python | apache-2.0 | 3,181 |
from monkey_island.cc.services.config_schema.config_schema_per_attack_technique import (
ConfigSchemaPerAttackTechnique,
)
REVERSE_FAKE_SCHEMA = {
"T0000": {
"Definition Type 1": ["Config Option 1", "Config Option 2"],
"Definition Type 2": ["Config Option 5", "Config Option 6"],
"Property Type 1 -> Category 1": ["Config Option 1"],
"Property Type 2 -> Category 1": ["Config Option 1"],
"Property Type 2 -> Category 2 -> Config Option 1": ["Config Option 1.1"],
"Property Type 2 -> Category 2 -> Config Option 2": ["Config Option 2.1"],
"Property Type 2 -> Category 2 -> Config Option 2 -> Config Option 2.1": [
"Config Option 2.1.1"
],
},
"T0001": {"Definition Type 1": ["Config Option 1"], "Definition Type 2": ["Config Option 5"]},
}
def test_get_config_schema_per_attack_technique(monkeypatch, fake_schema):
assert (
ConfigSchemaPerAttackTechnique().get_config_schema_per_attack_technique(fake_schema)
== REVERSE_FAKE_SCHEMA
)
| guardicore/monkey | monkey/tests/unit_tests/monkey_island/cc/services/config_schema/test_config_schema_per_attack_technique.py | Python | gpl-3.0 | 1,051 |
# coding=utf-8
import re
import time
import datetime
import requests
import logging
import base64
from urllib import parse
from lib.mp.base import Base
class QuTouTiao(Base):
mp_id = 13
zh_name = '趣头条'
@staticmethod
def login(user, pswd, **kw):
session = requests.session()
resp = session.get(
'https://mpapi.qutoutiao.net/member/login?email=&telephone={}&password={}&keep=&captcha=&source=1&k='
'&dtu=200'.format(user, pswd)
).json()
token = resp['data']['token']
if not token:
return {}
req = requests.get(
'https://mpapi.qutoutiao.net/member/getMemberInfo?token={}&dtu=200'.format(token)
).json()
if req['code'] != 0:
raise Exception('login fail')
logging.error('登录成功')
name = req['data']['nickname']
return dict(token=token), name
def publish(self, title, content, category, flag=1):
token = self.session.cookies['token']
status = 3
ariticle_id = None
cause = ''
self.logger.info("")
result = re.compile(r'<img.*?src="(.*?)".*?>', re.S).findall(content)
if not result:
cause = "请上传文章封面"
self.logger.error(cause)
return status, cause
coves = self.session.get(result[0])
img_data = 'data:image/jpeg;base64,' + base64.b64encode(coves.content).decode()
post_cove = self.session.post('https://qupload.qutoutiao.net/api/v1.0/image/uploadBase64',
data={
'token': token,
'action': 'image',
'upfile': img_data,
'dtu': 200}).json()
cover = post_cove['data']['path']
home = self.session.get(
'https://mpapi.qutoutiao.net/member/getMemberInfo?token={}&dtu=200'.format(token)
).json()
user_id = home['data']['member_id']
flag_id = 'article' + user_id + str(int(time.time() * 1000))
# 选择标签
tag = self.session.get(
'https://mpapi.qutoutiao.net/content/getCategoryList?token={}&dtu=200'.format(token)
).json()
for item in tag['data']:
if item['name'] == category:
category = item['cates'][-1]
data = {
'id': '',
'category': 22,
'two_level_category': '[%s]' % category,
'cover_type': 1,
'cover': '["{}"]'.format(cover),
'title': title,
'detail': content,
'image_list': '["{}"]'.format(result[0].split('com/')[-1]),
'is_delay': 0,
'is_origin': 1,
'tag': '教育,人工智能,鲸媒体',
'code': '',
'flag_id': flag_id,
'token': token,
'dtu': 200,
}
self.session.get('https://mpapi.qutoutiao.net/content/checkTitle?'
'title={}&flag_id={}'
'&type=content&token={}&dtu=200'.format(title, flag_id, token))
resp = self.session.post('https://mpapi.qutoutiao.net/content/saveNew', data=data).json()
try:
content_id = resp['data']['content_id']
except Exception as e:
QuTouTiao.logger.error(e)
return 3, resp['message']
self.session.post('https://mpapi.qutoutiao.net/content/setLocalChannel',
data={
'content_id': content_id,
'dtu': 200,
'local_channel_list': [],
'token': token
})
change_url = 'https://mpapi.qutoutiao.net/content/changeStatus?id={}&status=5' \
'&flag_id={}&token={}&dtu=200'.format(content_id, flag_id, token)
resp = self.session.get(change_url).json()
logging.error(resp)
if resp['code'] == 0:
status = 2
return status, ''
else:
status = 3
return status, resp['message']
def read_count(self,
start_day=datetime.datetime.now() - datetime.timedelta(days=7),
end_day=datetime.datetime.now() - datetime.timedelta(days=1)):
token = self.session.cookies['token']
url = 'https://mpapi.qutoutiao.net/report/brief?start_date={}&end_date={}' \
'&submemberid=&token={}&dtu=200'.format(
start_day.strftime('%Y-%m-%d'), end_day.strftime('%Y-%m-%d'), token)
resp = self.session.get(url).json()['data']['data']['daily']
read_list = []
for item in resp:
read_list.append(dict(
recomment_num=item['list_pv'],
read_num=item['pv'],
share_num=item['share_num'],
day_time=item['event_day']
))
return read_list
def fetch_article_status(self, title):
res = [2, '没查询到该文章', '']
token = self.session.cookies['token']
resp = self.session.get('https://mpapi.qutoutiao.net/content/getList?status=&page=1&title=&submemberid='
'&nickname=&start_date=''&end_date=&isMotherMember=false&token={}&dtu=200'
.format(token)).json()['data']['data']
for art in resp:
if title != art['title']:
continue
elif art['status'] == '2':
url = art['url']
res = 4, '', url
elif art['status'] == '3':
res = 3, art['reason'], ''
return res
def check_user_cookies(self):
token = self.session.cookies['token']
req = self.session.get(
'https://mpapi.qutoutiao.net/member/getMemberInfo?token={}&dtu=200'.format(token)
).json()
if req['code'] != 0:
return False
return True
def upload_image(self, image_name, image_data):
files = {
'upfile': (image_name, image_data, 'image/jpeg', {
'id': 'WU_FILE_0', 'name': image_name, 'type': 'image/jpeg',
"lastModifiedDate": 'Tue Jun 12 2018 19:22:30 GMT+0800 (中国标准时间)',
'size': ''})}
resp = self.session.post('https://editer2.1sapp.com/ueditor/php/controller.php?action=uploadimage&encode=utf-8',
files=files).json()
base_url = 'http://static.1sapp.com/image'
img_url = base_url + resp['url']
return img_url
| Svolcano/python_exercise | spiders/qutoutiao.py | Python | mit | 6,694 |
import os
import argparse
import datetime
import yaml
import api.src.common.initial_environment_config
from ..models.dense import create_model
from ..data_processing.data_generator import DataGenerator
from ..common.config import TrainingConfig, DataConfig, Config
from ..common.utils import print_info, ensure_dir
from .plot_trainings import get_description_string
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, LearningRateScheduler, EarlyStopping
RUNNING_TIME = datetime.datetime.now().strftime("%H_%M_%d_%m_%y")
def train(num_epochs, batch_size, input_size, num_workers):
if not Config.NO_SAVE:
ensure_dir(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME))
model = create_model((2592,))
callbacks = [
ModelCheckpoint(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'weights.h5'), save_best_only=True, monitor=TrainingConfig.callbacks_monitor),
CSVLogger(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'history.csv')),
LearningRateScheduler(TrainingConfig.schedule),
EarlyStopping(patience=5)
]if not Config.NO_SAVE else []
if not Config.NO_SAVE:
introduced_change = input("What new was introduced?: ")
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'change.txt'), 'w') as f:
f.write(introduced_change)
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'config.yml'), 'w') as f:
yaml.dump(list([TrainingConfig.get_config(), Config.get_config(), DataConfig.get_config()]), f, default_flow_style=False)
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'model.txt'), 'w') as f:
f.write(get_description_string(model))
optimizer = TrainingConfig.optimizer
data_generator_train = DataGenerator(DataConfig.PATHS['TRAINING_PROCESSED_DATA'], batch_size, input_size, False, True)
data_generator_valid = DataGenerator(DataConfig.PATHS['VALID_PROCESSED_DATA'], batch_size, input_size, True, True)
model.compile(optimizer, TrainingConfig.loss, metrics=TrainingConfig.metrics)
model.fit_generator(data_generator_train, samples_per_epoch=data_generator_train.samples_per_epoch, nb_epoch=num_epochs,
validation_data=data_generator_valid, nb_val_samples=data_generator_valid.samples_per_epoch,
callbacks=callbacks)
def main(args):
print_info("Training")
train(args.num_epochs, args.batch_size, args.input_size, args.num_workers)
print_info("Finished")
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Script performing training')
argparser.add_argument('--num_epochs', default=TrainingConfig.NB_EPOCHS, type=int, help='Number of training epochs')
argparser.add_argument('--num_workers', type=int, default=TrainingConfig.NUM_WORKERS, help='Number of workers during training')
argparser.add_argument('--batch_size', type=int, default=TrainingConfig.BATCH_SIZE, help='Batch size')
argparser.add_argument('--input_size', type=int, default=Config.IMAGE_SIZE, help='Image size to input')
arguments = argparser.parse_args()
main(arguments)
| kacper1095/asl-translator | api/src/scripts/train_simple_network.py | Python | gpl-3.0 | 3,211 |
"""
cryptography.py
Author: Roger Danilek
Credit: Nils
Assignment:
Write and submit a program that encrypts and decrypts user data.
See the detailed requirements at https://github.com/HHS-IntroProgramming/Cryptography/blob/master/README.md
"""
associations = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 .,:;'\"/\\<>(){}[]-=_+?!"
la = 1000*associations
from itertools import cycle
a = ""
while a != "q":
a = input("Enter e to encrypt, d to decrypt, or q to quit: ")
if a=="e":
encrypting = input("Message: ")
jumbler = input("Key: ")
e_list = []
j_list= []
for x in encrypting:
e_list.append(associations.find(x))
for x in jumbler:
j_list.append(associations.find(x))
if len(e_list) > len(j_list):
combined_list = list(zip(e_list, cycle(j_list)))
else:
combined_list = list(zip(cycle(e_list), j_list))
esum = [x + y for x, y in combined_list]
for x in esum:
print(la[x], end='')
print("")
print("")
elif a=="d":
decrypting = input("Message: ")
jumbler = input("Key: ")
d_list = []
j_list= []
for x in decrypting:
d_list.append(associations.find(x))
for x in jumbler:
j_list.append(associations.find(x))
if len(d_list) > len(j_list):
combined_list = list(zip(d_list, cycle(j_list)))
else:
combined_list = list(zip(cycle(d_list), j_list))
dsum = [x - y for x, y in combined_list]
for x in dsum:
print(la[x], end='')
print("")
print("")
elif a =="q":
print("Goodbye!")
else:
print("Did not understand command, try again.")
#associations.find(char)
#associations[index] | RDanilek/Cryptography | cryptography.py | Python | mit | 1,838 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserData'
db.create_table(u'dingos_userdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('data_kind', self.gf('django.db.models.fields.SlugField')(max_length=32)),
('identifier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dingos.Identifier'], null=True)),
))
db.send_create_signal(u'dingos', ['UserData'])
# Adding unique constraint on 'UserData', fields ['user', 'data_kind']
db.create_unique(u'dingos_userdata', ['user_id', 'data_kind'])
def backwards(self, orm):
# Removing unique constraint on 'UserData', fields ['user', 'data_kind']
db.delete_unique(u'dingos_userdata', ['user_id', 'data_kind'])
# Deleting model 'UserData'
db.delete_table(u'dingos_userdata')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dingos.blobstorage': {
'Meta': {'object_name': 'BlobStorage'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sha256': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'dingos.datatypenamespace': {
'Meta': {'object_name': 'DataTypeNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.fact': {
'Meta': {'object_name': 'Fact'},
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}),
'fact_values': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.FactValue']", 'null': 'True', 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value_iobject_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_of_set'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'value_iobject_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'dingos.factdatatype': {
'Meta': {'unique_together': "(('name', 'namespace'),)", 'object_name': 'FactDataType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_data_type_set'", 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.factterm': {
'Meta': {'unique_together': "(('term', 'attribute'),)", 'object_name': 'FactTerm'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'dingos.factterm2type': {
'Meta': {'unique_together': "(('iobject_type', 'fact_term'),)", 'object_name': 'FactTerm2Type'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_types': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fact_term_thru'", 'symmetrical': 'False', 'to': u"orm['dingos.FactDataType']"}),
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_thru'", 'to': u"orm['dingos.FactTerm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_term_thru'", 'to': u"orm['dingos.InfoObjectType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.factvalue': {
'Meta': {'unique_together': "(('value', 'fact_data_type', 'storage_location'),)", 'object_name': 'FactValue'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_value_set'", 'to': u"orm['dingos.FactDataType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'storage_location': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'dingos.identifier': {
'Meta': {'unique_together': "(('uid', 'namespace'),)", 'object_name': 'Identifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'latest_of'", 'unique': 'True', 'null': 'True', 'to': u"orm['dingos.InfoObject']"}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.IdentifierNameSpace']"}),
'uid': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'dingos.identifiernamespace': {
'Meta': {'object_name': 'IdentifierNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.infoobject': {
'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('identifier', 'timestamp'),)", 'object_name': 'InfoObject'},
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'facts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.Fact']", 'through': u"orm['dingos.InfoObject2Fact']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.Identifier']"}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'iobject_family_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'iobject_type_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'dingos.infoobject2fact': {
'Meta': {'ordering': "['node_id__name']", 'object_name': 'InfoObject2Fact'},
'attributed_fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'null': 'True', 'to': u"orm['dingos.InfoObject2Fact']"}),
'fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_thru'", 'to': u"orm['dingos.Fact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_thru'", 'to': u"orm['dingos.InfoObject']"}),
'node_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.NodeID']"})
},
u'dingos.infoobjectfamily': {
'Meta': {'object_name': 'InfoObjectFamily'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.infoobjectnaming': {
'Meta': {'ordering': "['position']", 'object_name': 'InfoObjectNaming'},
'format_string': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'dingos.infoobjecttype': {
'Meta': {'unique_together': "(('name', 'iobject_family', 'namespace'),)", 'object_name': 'InfoObjectType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '30'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'blank': 'True', 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.marking2x': {
'Meta': {'object_name': 'Marking2X'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'marked_item_thru'", 'to': u"orm['dingos.InfoObject']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'dingos.nodeid': {
'Meta': {'object_name': 'NodeID'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.relation': {
'Meta': {'unique_together': "(('source_id', 'target_id', 'relation_type'),)", 'object_name': 'Relation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'relation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Fact']"}),
'source_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yields_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'target_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yielded_by_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"})
},
u'dingos.revision': {
'Meta': {'object_name': 'Revision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'dingos.userdata': {
'Meta': {'unique_together': "(('user', 'data_kind'),)", 'object_name': 'UserData'},
'data_kind': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Identifier']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['dingos'] | siemens/django-dingos | dingos/migrations/0007_auto__add_userdata__add_unique_userdata_user_data_kind.py | Python | gpl-2.0 | 16,414 |
from PyQt4.QtCore import Qt
from nodebox.graphics.qt import *
from nodebox.graphics import qt
from nodebox.util import _copy_attr, _copy_attrs
__all__ = list(qt.__all__)
__all__.extend(['Context'])
class Context(object):
KEY_UP = Qt.Key_Up
KEY_DOWN = Qt.Key_Down
KEY_LEFT = Qt.Key_Left
KEY_RIGHT = Qt.Key_Right
KEY_BACKSPACE = Qt.Key_Backspace
def __init__(self, canvas=None, ns=None):
"""Initializes the context.
Note that we have to give the namespace of the executing script,
which is a hack to keep the WIDTH and HEIGHT properties updated.
Python's getattr only looks up property values once: at assign time."""
if canvas is None:
canvas = Canvas()
if ns is None:
ns = {}
self.canvas = canvas
self._ns = ns
self._imagecache = {}
self._vars = []
self._resetContext()
def _resetContext(self):
self._outputmode = RGB
self._colormode = RGB
self._colorrange = 1.0
self._fillcolor = self.Color()
self._strokecolor = None
self._strokewidth = 1.0
self.canvas.background = self.Color(1.0)
self._path = None
self._autoclosepath = True
self._transform = Transform()
self._transformmode = CENTER
self._transformstack = []
self._fontname = "Helvetica"
self._fontsize = 24
self._lineheight = 1.2
self._align = LEFT
self._noImagesHint = False
self._oldvars = self._vars
self._vars = []
def ximport(self, libName):
lib = __import__(libName)
self._ns[libName] = lib
lib._ctx = self
return lib
### Setup methods ###
def size(self, width, height):
self.canvas.width = width
self.canvas.height = height
self._ns["WIDTH"] = width
self._ns["HEIGHT"] = height
def _get_width(self):
return self.canvas.width
WIDTH = property(_get_width)
def _get_height(self):
return self.canvas.height
HEIGHT = property(_get_height)
def speed(self, speed):
self.canvas.speed = speed
def background(self, *args):
if len(args) > 0:
if len(args) == 1 and args[0] is None:
self.canvas.background = None
else:
self.canvas.background = self.Color(args)
return self.canvas.background
def outputmode(self, mode=None):
if mode is not None:
self._outputmode = mode
return self._outputmode
### Variables ###
def var(self, name, type, default=None, min=0, max=100, value=None):
v = Variable(name, type, default, min, max, value)
v = self.addvar(v)
def addvar(self, v):
oldvar = self.findvar(v.name)
if oldvar is not None:
if oldvar.compliesTo(v):
v.value = oldvar.value
self._vars.append(v)
self._ns[v.name] = v.value
def findvar(self, name):
for v in self._oldvars:
if v.name == name:
return v
return None
### Objects ####
def _makeInstance(self, clazz, args, kwargs):
"""Creates an instance of a class defined in this document.
This method sets the context of the object to the current context."""
inst = clazz(self, *args, **kwargs)
return inst
def BezierPath(self, *args, **kwargs):
return self._makeInstance(BezierPath, args, kwargs)
def ClippingPath(self, *args, **kwargs):
return self._makeInstance(ClippingPath, args, kwargs)
def Rect(self, *args, **kwargs):
return self._makeInstance(Rect, args, kwargs)
def Oval(self, *args, **kwargs):
return self._makeInstance(Oval, args, kwargs)
def Color(self, *args, **kwargs):
return self._makeInstance(Color, args, kwargs)
def Image(self, *args, **kwargs):
return self._makeInstance(Image, args, kwargs)
def Text(self, *args, **kwargs):
return self._makeInstance(Text, args, kwargs)
### Primitives ###
def rect(self, x, y, width, height, roundness=0.0, draw=True, **kwargs):
BezierPath.checkKwargs(kwargs)
if roundness == 0:
p = self.BezierPath(**kwargs)
p.rect(x, y, width, height)
else:
curve = min(width*roundness, height*roundness)
p = self.BezierPath(**kwargs)
p.moveto(x, y+curve)
p.curveto(x, y, x, y, x+curve, y)
p.lineto(x+width-curve, y)
p.curveto(x+width, y, x+width, y, x+width, y+curve)
p.lineto(x+width, y+height-curve)
p.curveto(x+width, y+height, x+width, y+height, x+width-curve, y+height)
p.lineto(x+curve, y+height)
p.curveto(x, y+height, x, y+height, x, y+height-curve)
p.closepath()
p.inheritFromContext(kwargs.keys())
if draw:
p.draw()
return p
def oval(self, x, y, width, height, draw=True, **kwargs):
BezierPath.checkKwargs(kwargs)
path = self.BezierPath(**kwargs)
path.oval(x, y, width, height)
path.inheritFromContext(kwargs.keys())
if draw:
path.draw()
return path
def line(self, x1, y1, x2, y2, draw=True, **kwargs):
BezierPath.checkKwargs(kwargs)
p = self.BezierPath(**kwargs)
p.line(x1, y1, x2, y2)
p.inheritFromContext(kwargs.keys())
if draw:
p.draw()
return p
def star(self, startx, starty, points=20, outer= 100, inner = 50, draw=True, **kwargs):
BezierPath.checkKwargs(kwargs)
from math import sin, cos, pi
p = self.BezierPath(**kwargs)
p.moveto(startx, starty + outer)
for i in range(1, int(2 * points)):
angle = i * pi / points
x = sin(angle)
y = cos(angle)
if i % 2:
radius = inner
else:
radius = outer
x = startx + radius * x
y = starty + radius * y
p.lineto(x,y)
p.closepath()
p.inheritFromContext(kwargs.keys())
if draw:
p.draw()
return p
def arrow(self, x, y, width=100, type=NORMAL, draw=True, **kwargs):
"""Draws an arrow.
Draws an arrow at position x, y, with a default width of 100.
There are two different types of arrows: NORMAL and trendy FORTYFIVE degrees arrows.
When draw=False then the arrow's path is not ended, similar to endpath(draw=False)."""
BezierPath.checkKwargs(kwargs)
if type==NORMAL:
return self._arrow(x, y, width, draw, **kwargs)
elif type==FORTYFIVE:
return self._arrow45(x, y, width, draw, **kwargs)
else:
raise NodeBoxError("arrow: available types for arrow() are NORMAL and FORTYFIVE\n")
def _arrow(self, x, y, width, draw, **kwargs):
head = width * .4
tail = width * .2
p = self.BezierPath(**kwargs)
p.moveto(x, y)
p.lineto(x-head, y+head)
p.lineto(x-head, y+tail)
p.lineto(x-width, y+tail)
p.lineto(x-width, y-tail)
p.lineto(x-head, y-tail)
p.lineto(x-head, y-head)
p.lineto(x, y)
p.closepath()
p.inheritFromContext(kwargs.keys())
if draw:
p.draw()
return p
def _arrow45(self, x, y, width, draw, **kwargs):
head = .3
tail = 1 + head
p = self.BezierPath(**kwargs)
p.moveto(x, y)
p.lineto(x, y+width*(1-head))
p.lineto(x-width*head, y+width)
p.lineto(x-width*head, y+width*tail*.4)
p.lineto(x-width*tail*.6, y+width)
p.lineto(x-width, y+width*tail*.6)
p.lineto(x-width*tail*.4, y+width*head)
p.lineto(x-width, y+width*head)
p.lineto(x-width*(1-head), y)
p.lineto(x, y)
p.inheritFromContext(kwargs.keys())
if draw:
p.draw()
return p
### Path Commands ###
def beginpath(self, x=None, y=None):
self._path = self.BezierPath()
self._pathclosed = False
if x != None and y != None:
self._path.moveto(x,y)
def moveto(self, x, y):
if self._path is None:
raise NodeBoxError, "No current path. Use beginpath() first."
self._path.moveto(x,y)
def lineto(self, x, y):
if self._path is None:
raise NodeBoxError, "No current path. Use beginpath() first."
self._path.lineto(x, y)
def curveto(self, x1, y1, x2, y2, x3, y3):
if self._path is None:
raise NodeBoxError, "No current path. Use beginpath() first."
self._path.curveto(x1, y1, x2, y2, x3, y3)
def closepath(self):
if self._path is None:
raise NodeBoxError, "No current path. Use beginpath() first."
if not self._pathclosed:
self._path.closepath()
def endpath(self, draw=True):
if self._path is None:
raise NodeBoxError, "No current path. Use beginpath() first."
if self._autoclosepath:
self.closepath()
p = self._path
p.inheritFromContext()
if draw:
p.draw()
self._path = None
self._pathclosed = False
return p
def drawpath(self, path, **kwargs):
BezierPath.checkKwargs(kwargs)
if isinstance(path, (list, tuple)):
path = self.BezierPath(path, **kwargs)
else: # Set the values in the current bezier path with the kwargs
for arg_key, arg_val in kwargs.items():
setattr(path, arg_key, _copy_attr(arg_val))
path.inheritFromContext(kwargs.keys())
path.draw()
def autoclosepath(self, close=True):
self._autoclosepath = close
def findpath(self, points, curvature=1.0):
import bezier
path = bezier.findpath(points, curvature=curvature)
path._ctx = self
path.inheritFromContext()
return path
### Clipping Commands ###
def beginclip(self, path):
cp = self.ClippingPath(path)
self.canvas.push(cp)
return cp
def endclip(self):
self.canvas.pop()
### Transformation Commands ###
def push(self):
self._transformstack.insert(0, self._transform.matrix)
def pop(self):
try:
self._transform = Transform(self._transformstack[0])
del self._transformstack[0]
except IndexError, e:
raise NodeBoxError, "pop: too many pops!"
def transform(self, mode=None):
if mode is not None:
self._transformmode = mode
return self._transformmode
def translate(self, x, y):
self._transform.translate(x, y)
def reset(self):
self._transform = Transform()
def rotate(self, degrees=0, radians=0):
self._transform.rotate(-degrees,-radians)
def translate(self, x=0, y=0):
self._transform.translate(x,y)
def scale(self, x=1, y=None):
self._transform.scale(x,y)
def skew(self, x=0, y=0):
self._transform.skew(x,y)
### Color Commands ###
color = Color
def colormode(self, mode=None, range=None):
if mode is not None:
self._colormode = mode
if range is not None:
self._colorrange = float(range)
return self._colormode
def colorrange(self, range=None):
if range is not None:
self._colorrange = float(range)
return self._colorrange
def nofill(self):
self._fillcolor = None
def fill(self, *args):
if len(args) > 0:
self._fillcolor = self.Color(*args)
return self._fillcolor
def nostroke(self):
self._strokecolor = None
def stroke(self, *args):
if len(args) > 0:
self._strokecolor = self.Color(*args)
return self._strokecolor
def strokewidth(self, width=None):
if width is not None:
self._strokewidth = max(width, 0.0001)
return self._strokewidth
### Font Commands ###
def font(self, fontname=None, fontsize = None):
if fontname is not None:
fnt, exists = Text.font_exists(fontname)
if not exists:
raise NodeBoxError, 'Font "%s" not found.' % fontname
else:
self._fontname = fontname
if fontsize is not None:
self._fontsize = fontsize
return self._fontname
def fontsize(self, fontsize=None):
if fontsize is not None:
self._fontsize = fontsize
return self._fontsize
def lineheight(self, lineheight=None):
if lineheight is not None:
self._lineheight = max(lineheight, 0.01)
return self._lineheight
def align(self, align=None):
if align is not None:
self._align = align
return self._align
def textwidth(self, txt, width=None, **kwargs):
"""Calculates the width of a single-line string."""
return self.textmetrics(txt, width, **kwargs)[0]
def textheight(self, txt, width=None, **kwargs):
"""Calculates the height of a (probably) multi-line string."""
return self.textmetrics(txt, width, **kwargs)[1]
def text(self, txt, x, y, width=None, height=None, outline=False, draw=True, **kwargs):
Text.checkKwargs(kwargs)
txt = self.Text(txt, x, y, width, height, **kwargs)
txt.inheritFromContext(kwargs.keys())
if outline:
path = txt.path
if draw:
path.draw()
return path
else:
if draw:
txt.draw()
return txt
def textpath(self, txt, x, y, width=None, height=None, **kwargs):
Text.checkKwargs(kwargs)
txt = self.Text(txt, x, y, width, height, **kwargs)
txt.inheritFromContext(kwargs.keys())
return txt.path
def textmetrics(self, txt, width=None, height=None, **kwargs):
txt = self.Text(txt, 0, 0, width, height, **kwargs)
txt.inheritFromContext(kwargs.keys())
return txt.metrics
### Image commands ###
def image(self, path, x, y, width=None, height=None, alpha=1.0, data=None, draw=True, **kwargs):
img = self.Image(path, x, y, width, height, alpha, data=data, **kwargs)
img.inheritFromContext(kwargs.keys())
if draw:
img.draw()
return img
def imagesize(self, path, data=None):
img = self.Image(path, data=data)
return img.size
### Canvas proxy ###
def save(self, fname, format=None):
self.canvas.save(fname, format)
| gt-ros-pkg/rcommander-core | nodebox_qt/src/nodebox/graphics/__init__.py | Python | bsd-3-clause | 14,849 |
from bs4 import BeautifulSoup
from django.template import Context, Template
from ws.tests import TestCase, factories
class DiscountTagsTest(TestCase):
def test_no_discounts(self):
html_template = Template(
'{% load discount_tags %}{% active_discounts participant%}'
)
context = Context({'participant': factories.ParticipantFactory.create()})
self.assertFalse(html_template.render(context).strip())
def test_discounts(self):
participant = factories.ParticipantFactory.create()
gym = factories.DiscountFactory.create(name="Local Gym", url='example.com/gym')
retailer = factories.DiscountFactory.create(
name="Large Retailer", url='example.com/retail'
)
factories.DiscountFactory.create(name="Other Outing Club")
participant.discounts.add(gym)
participant.discounts.add(retailer)
html_template = Template(
'{% load discount_tags %}{% active_discounts participant%}'
)
context = Context({'participant': participant})
raw_html = html_template.render(context)
soup = BeautifulSoup(raw_html, 'html.parser')
self.assertEqual(
soup.find('p').get_text(' ', strip=True),
'You are sharing your name, email address, and membership status with the following companies:',
)
self.assertEqual(
[str(li) for li in soup.find('ul').find_all('li')],
[
'<li><a href="example.com/gym">Local Gym</a></li>',
'<li><a href="example.com/retail">Large Retailer</a></li>',
],
)
self.assertTrue(
soup.find('a', text='discount preferences', href="/preferences/discounts/")
)
| DavidCain/mitoc-trips | ws/tests/templatetags/test_discount_tags.py | Python | gpl-3.0 | 1,775 |
from . import empty
from serializers import groups
import aggregators
def create_recursive():
obj = groups.Group(RecursiveHeader(), RecursiveDescription(), RecursiveData())
return groups.Recursive(obj)
class RecursiveHeader(empty.StageVisitor):
def row_for(self, stage_class):
return stage_class.visit_class(self)
def row(self):
return [
'timeflow_angle', 'timeflow_length', 'timeflow_vector_angle'
]
def case_present_past_future(self, stage_class):
return self.row()
def case_seasons_of_year(self, stage_class):
return self.row()
def case_days_of_week(self, stage_class):
return self.row()
def case_parts_of_day(self, stage_class):
return [
'timeflow_arc'
]
class RecursiveDescription(empty.StageVisitor):
def row_for(self, stage_class):
return stage_class.visit_class(self)
def row(self):
return [
'Ángulo entre el elemento y el siguiente (cronológico)',
'Distancia entre el elemento y el siguiente (cronológico)',
'Ángulo entre el vector anterior y siguiente al elemento (cronológico)'
]
def case_present_past_future(self, stage_class):
return self.row()
def case_seasons_of_year(self, stage_class):
return self.row()
def case_days_of_week(self, stage_class):
return self.row()
def case_parts_of_day(self, stage_class):
return [
'Grados entre el elemento y el siguiente (cronológico)'
]
class RecursiveData(empty.StageVisitor):
def __init__(self):
self.stage = None
def row_for_element(self, stage, element):
self.element = element
return stage.visit(self)
def row(self, stage):
if self.stage != stage:
self.timeflow = aggregators.Timeflow(stage)
return [
self.timeflow.angle_each()[self.element],
self.timeflow.length_each()[self.element],
self.timeflow.vector_angle_each()[self.element]
]
def case_present_past_future(self, stage):
return self.row(stage)
def case_seasons_of_year(self, stage):
return self.row(stage)
def case_days_of_week(self, stage):
return self.row(stage)
def case_parts_of_day(self, stage):
timeflow_pod = aggregators.PartsOfDayTimeflow(stage)
return [
timeflow_pod.wrap_distance_each()[self.element]
]
| alepulver/my-thesis | results-tables/serializers/stage/time_flow.py | Python | mit | 2,501 |
#!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.parser
import itertools
import math
import random
import time
import unittest
from collections import defaultdict
from contextlib import contextmanager
import json
from hashlib import md5
import mock
from eventlet import Timeout
from six import BytesIO
from six.moves import range
import swift
from swift.common import utils, swob, exceptions
from swift.common.exceptions import ChunkWriteTimeout
from swift.common.utils import Timestamp
from swift.proxy import server as proxy_server
from swift.proxy.controllers import obj
from swift.proxy.controllers.base import \
get_container_info as _real_get_container_info
from swift.common.storage_policy import POLICIES, ECDriverError, \
StoragePolicy, ECStoragePolicy
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
debug_logger, patch_policies, SlowBody, FakeStatus, \
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies, make_ec_object_stub, \
fake_ec_node_response, StubResponse
from test.unit.proxy.test_server import node_error_count
def unchunk_body(chunked_body):
body = ''
remaining = chunked_body
while remaining:
hex_length, remaining = remaining.split('\r\n', 1)
length = int(hex_length, 16)
body += remaining[:length]
remaining = remaining[length + 2:]
return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
try:
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
yield new_connect
left_over_status = list(new_connect.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
finally:
swift.proxy.controllers.base.http_connect = old_connect
swift.proxy.controllers.obj.http_connect = old_connect
swift.proxy.controllers.account.http_connect = old_connect
swift.proxy.controllers.container.http_connect = old_connect
class PatchedObjControllerApp(proxy_server.Application):
"""
This patch is just a hook over the proxy server's __call__ to ensure
that calls to get_container_info will return the stubbed value for
container_info if it's a container info call.
"""
container_info = {}
per_container_info = {}
def __call__(self, *args, **kwargs):
def _fake_get_container_info(env, app, swift_source=None):
_vrs, account, container, _junk = utils.split_path(
env['PATH_INFO'], 3, 4)
# Seed the cache with our container info so that the real
# get_container_info finds it.
ic = env.setdefault('swift.infocache', {})
cache_key = "container/%s/%s" % (account, container)
old_value = ic.get(cache_key)
# Copy the container info so we don't hand out a reference to a
# mutable thing that's set up only once at compile time. Nothing
# *should* mutate it, but it's better to be paranoid than wrong.
if container in self.per_container_info:
ic[cache_key] = self.per_container_info[container].copy()
else:
ic[cache_key] = self.container_info.copy()
real_info = _real_get_container_info(env, app, swift_source)
if old_value is None:
del ic[cache_key]
else:
ic[cache_key] = old_value
return real_info
with mock.patch('swift.proxy.server.get_container_info',
new=_fake_get_container_info), \
mock.patch('swift.proxy.controllers.base.get_container_info',
new=_fake_get_container_info):
return super(
PatchedObjControllerApp, self).__call__(*args, **kwargs)
def make_footers_callback(body=None):
# helper method to create a footers callback that will generate some fake
# footer metadata
cont_etag = 'container update etag may differ'
crypto_etag = '20242af0cd21dd7195a10483eb7472c9'
etag_crypto_meta = \
'{"cipher": "AES_CTR_256", "iv": "sD+PSw/DfqYwpsVGSo0GEw=="}'
etag = md5(body).hexdigest() if body is not None else None
footers_to_add = {
'X-Object-Sysmeta-Container-Update-Override-Etag': cont_etag,
'X-Object-Sysmeta-Crypto-Etag': crypto_etag,
'X-Object-Sysmeta-Crypto-Meta-Etag': etag_crypto_meta,
'X-I-Feel-Lucky': 'Not blocked',
'Etag': etag}
def footers_callback(footers):
footers.update(footers_to_add)
return footers_callback
class BaseObjectControllerMixin(object):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
# this needs to be set on the test case
controller_cls = None
def setUp(self):
# setup fake rings with handoffs
for policy in POLICIES:
policy.object_ring.max_more_nodes = policy.object_ring.replicas
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
# you can over-ride the container_info just by setting it on the app
# (see PatchedObjControllerApp for details)
self.app.container_info = dict(self.container_info)
# default policy and ring references
self.policy = POLICIES.default
self.obj_ring = self.policy.object_ring
self._ts_iter = (utils.Timestamp(t) for t in
itertools.count(int(time.time())))
def ts(self):
return next(self._ts_iter)
def replicas(self, policy=None):
policy = policy or POLICIES.default
return policy.object_ring.replicas
def quorum(self, policy=None):
policy = policy or POLICIES.default
return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
# we'll write to one more than replica count local nodes
policy_conf.write_affinity_node_count_fn = lambda r: r + 1
object_ring = self.policy.object_ring
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100000
# nothing magic about * 2 + 3, just a way to make it bigger
self.app.request_node_count = lambda r: r * 2 + 3
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# limit to the number we're going to look at in this request
nodes_requested = self.app.request_node_count(object_ring.replicas)
all_nodes = all_nodes[:nodes_requested]
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertGreaterEqual(len(all_local_nodes), self.replicas() + 1)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# the local nodes move up in the ordering
self.assertEqual([1] * (self.replicas() + 1), [
node['region'] for node in local_first_nodes[
:self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_iter_nodes_local_first_best_effort(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# we won't have quite enough local nodes...
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertEqual(len(all_local_nodes), self.replicas())
# but the local nodes we do have are at the front of the local iter
first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
self.assertEqual(sorted(all_local_nodes),
sorted(first_n_local_first_nodes))
# but we *still* don't *skip* any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_connect_put_node_timeout(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', req, {}, ('', ''))
self.assertIsNone(res)
def test_DELETE_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_missing_one(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] + [204] * (self.replicas() - 1)
random.shuffle(codes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_not_found(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] * (self.replicas() - 1) + [204]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_mostly_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_204s = [204] * self.quorum()
codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_mostly_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_404s = [404] * self.quorum()
codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_half_not_found_statuses(self):
self.obj_ring.set_replicas(4)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 404, 204):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_half_not_found_headers_and_body(self):
# Transformed responses have bogus bodies and headers, so make sure we
# send the client headers and body from a real node's response.
self.obj_ring.set_replicas(4)
status_codes = (404, 404, 204, 204)
bodies = ('not found', 'not found', '', '')
headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(*status_codes, body_iter=bodies,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('Pick-Me'), 'yes')
self.assertEqual(resp.body, '')
def test_DELETE_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(507, *codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_POST_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_PUT_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_POST_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_PUT_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_POST_delete_at_non_integer(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_PUT_delete_at_non_integer(self):
t = str(int(time.time() - 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_POST_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_PUT_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
with set_http_connect(*([200] * self.replicas())):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_HEAD_x_newest_different_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
def test_HEAD_x_newest_with_two_vector_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(time.time(), offset=offset)
for offset in itertools.count())
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-backend-timestamp'],
newest_timestamp.internal)
def test_HEAD_x_newest_with_some_missing(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
request_count = self.app.request_node_count(self.obj_ring.replicas)
backend_response_headers = [{
'x-timestamp': next(ts).normal,
} for i in range(request_count)]
responses = [404] * (request_count - 1)
responses.append(200)
request_log = []
def capture_requests(ip, port, device, part, method, path,
headers=None, **kwargs):
req = {
'ip': ip,
'port': port,
'device': device,
'part': part,
'method': method,
'path': path,
'headers': headers,
}
request_log.append(req)
with set_http_connect(*responses,
headers=backend_response_headers,
give_connect=capture_requests):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for req in request_log:
self.assertEqual(req['method'], 'HEAD')
self.assertEqual(req['path'], '/a/c/o')
def test_container_sync_delete(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
req = swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Timestamp': next(ts).internal})
codes = [409] * self.obj_ring.replicas
ts_iter = itertools.repeat(next(ts).internal)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_PUT_requires_length(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 411)
def test_container_update_backend_requests(self):
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy)})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# This is the number of container updates we're doing, simulating
# 1 to 15 container replicas.
for num_containers in range(1, 16):
containers = [{'ip': '1.0.0.%s' % i,
'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers)
# how many of the backend headers have a container update
container_updates = len(
[headers for headers in backend_headers
if 'X-Container-Partition' in headers])
if num_containers <= self.quorum(policy):
# filling case
expected = min(self.quorum(policy) + 1,
self.replicas(policy))
else:
# container updates >= object replicas
expected = min(num_containers,
self.replicas(policy))
self.assertEqual(container_updates, expected)
def _check_write_affinity(
self, conf, policy_conf, policy, affinity_regions, affinity_count):
conf['policy_config'] = policy_conf
app = PatchedObjControllerApp(
conf, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
controller = self.controller_cls(app, 'a', 'c', 'o')
object_ring = app.get_object_ring(int(policy))
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
n['region'] in affinity_regions]
self.assertGreaterEqual(len(all_local_nodes), affinity_count)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, policy))
# check that the required number of local nodes were moved up the order
node_regions = [node['region'] for node in local_first_nodes]
self.assertTrue(
all(r in affinity_regions for r in node_regions[:affinity_count]),
'Unexpected region found in local nodes, expected %s but got %s' %
(affinity_regions, node_regions))
return app
def test_write_affinity_not_configured(self):
# default is no write affinity so expect both regions 0 and 1
self._check_write_affinity({}, {}, POLICIES[0], [0, 1],
2 * self.replicas(POLICIES[0]))
self._check_write_affinity({}, {}, POLICIES[1], [0, 1],
2 * self.replicas(POLICIES[1]))
def test_write_affinity_proxy_server_config(self):
# without overrides policies use proxy-server config section options
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
self._check_write_affinity(conf, {}, POLICIES[0], [0],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, {}, POLICIES[1], [0],
self.replicas(POLICIES[1]))
def test_write_affinity_per_policy_config(self):
# check only per-policy configuration is sufficient
conf = {}
policy_conf = {'0': {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '5',
'write_affinity': 'r0'}}
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0], 5)
def test_write_affinity_per_policy_config_overrides_and_inherits(self):
# check per-policy config is preferred over proxy-server section config
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
policy_conf = {'0': {'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '3 * replicas'}}
# policy 0 inherits default node count, override affinity to r1
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
# policy 1 inherits default affinity to r0, overrides node count
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0],
3 * self.replicas(POLICIES[1]))
# end of BaseObjectControllerMixin
@patch_policies()
class TestReplicatedObjController(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_with_footers(self):
footers_callback = make_footers_callback('')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.headers['content-length'] = '0'
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def _test_PUT_with_no_footers(self, test_body='', chunked=False):
# verify that when no footers are required then the PUT uses a regular
# single part body
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=test_body)
if chunked:
req.headers['Transfer-Encoding'] = 'chunked'
etag = md5(test_body).hexdigest()
req.headers['Etag'] = etag
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
codes = [201] * self.replicas()
expect_headers = {'X-Obj-Metadata-Footer': 'yes'}
resp_headers = {
'Some-Header': 'Four',
'Etag': '"%s"' % etag,
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': etag,
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = ''.join(info['chunks'])
headers = info['headers']
if chunked:
body = unchunk_body(body)
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('chunked', headers['Transfer-Encoding'])
else:
self.assertNotIn('Transfer-Encoding', headers)
if body:
self.assertEqual('100-continue', headers['Expect'])
else:
self.assertNotIn('Expect', headers)
self.assertNotIn('X-Backend-Obj-Multipart-Mime-Boundary', headers)
self.assertNotIn('X-Backend-Obj-Metadata-Footer', headers)
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual(etag, headers['Etag'])
self.assertEqual(test_body, body)
self.assertTrue(info['connection'].closed)
def test_PUT_with_chunked_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='asdf', chunked=True)
def test_PUT_with_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='asdf', chunked=False)
def test_PUT_with_no_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='', chunked=False)
def _test_PUT_with_footers(self, test_body=''):
# verify that when footers are required the PUT body is multipart
# and the footers are appended
footers_callback = make_footers_callback(test_body)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.body = test_body
# send bogus Etag header to differentiate from footer value
req.headers['Etag'] = 'header_etag'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
resp_headers = {
'Etag': '"resp_etag"',
# NB: ignored!
'Some-Header': 'Four',
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': 'resp_etag',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
headers = info['headers']
boundary = headers['X-Backend-Obj-Multipart-Mime-Boundary']
self.assertTrue(boundary is not None,
"didn't get boundary for conn %r" % (
connection_id,))
self.assertEqual('chunked', headers['Transfer-Encoding'])
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('yes', headers['X-Backend-Obj-Metadata-Footer'])
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual('header_etag', headers['Etag'])
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
boundary)
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
# notice, no commit confirmation
self.assertEqual(len(mime_parts), 2)
obj_part, footer_part = mime_parts
self.assertEqual(obj_part['X-Document'], 'object body')
self.assertEqual(test_body, obj_part.get_payload())
# validate footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
footers_callback(expected)
self.assertDictEqual(expected, footer_metadata)
self.assertTrue(info['connection'].closed)
def test_PUT_with_body_and_footers(self):
self._test_PUT_with_footers(test_body='asdf')
def test_PUT_with_no_body_and_footers(self):
self._test_PUT_with_footers()
def test_txn_id_logging_on_PUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
req.headers['content-length'] = '0'
# we capture stdout since the debug log formatter prints the formatted
# message to stdout
stdout = BytesIO()
with set_http_connect((100, Timeout()), 503, 503), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get final status of PUT to',
stdout.getvalue())
def test_PUT_empty_bad_etag(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['Content-Length'] = '0'
req.headers['Etag'] = '"catbus"'
# The 2-tuple here makes getexpect() return 422, not 100. For objects
# that are >0 bytes, you get a 100 Continue and then a 422
# Unprocessable Entity after sending the body. For zero-byte objects,
# though, you get the 422 right away because no Expect header is sent
# with zero-byte PUT. The second status in the tuple should not be
# consumed, it's just there to make the FakeStatus treat the first as
# an expect status, but we'll make it something other than a 422 so
# that if it is consumed then the test should fail.
codes = [FakeStatus((422, 200))
for _junk in range(self.replicas())]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 422)
def test_PUT_if_none_match(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 412, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
def test_PUT_connect_exceptions(self):
object_ring = self.app.get_object_ring(None)
self.app.sort_nodes = lambda n, *args, **kwargs: n # disable shuffle
def test_status_map(statuses, expected):
self.app._error_limiting = {}
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
test_status_map(status_list, 201)
for j in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[j]), 1 if j == i else 0)
# connect errors
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[1]), 1)
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[0]), 1)
# expect errors
test_status_map((201, 201, (503, None), 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[2]), 1)
test_status_map(((507, None), 201, 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# response errors
test_status_map(((100, Timeout()), 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]), 1)
test_status_map((201, 201, (100, Exception())), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[2]), 1)
test_status_map((201, (100, 507), 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[1]),
self.app.error_suppression_limit + 1)
def test_PUT_connect_exception_with_unicode_path(self):
expected = 201
statuses = (
Exception('Connection refused: Please insert ten dollars'),
201, 201, 201)
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('re: Expect: 100-continue', log_lines[0])
def test_PUT_get_expect_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return log_lines
log_lines = do_test((201, (507, None), 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
log_lines = do_test((201, (503, None), 201, 201))
self.assertIn('ERROR 503 Expect: 100-continue From Object Server',
log_lines[0])
def test_PUT_send_exception_with_unicode_path(self):
def do_test(exc):
conns = set()
def capture_send(conn, data):
conns.add(conn)
if len(conns) == 2:
raise exc
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(201, 201, 201, give_send=capture_send):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to write to', log_lines[0])
do_test(Exception('Exception while sending data on connection'))
do_test(ChunkWriteTimeout())
def test_PUT_final_response_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test((201, (100, Exception('boom')), 201))
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, Timeout()), 201))
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, 507), 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test((201, (100, 500), 201))
self.assertIn('ERROR 500 From Object Server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
def test_DELETE_errors(self):
# verify logged errors with and without non-ascii characters in path
def do_test(path, statuses):
req = swob.Request.blank('/v1' + path,
method='DELETE',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
def test_PUT_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Timeout()
conns = []
def capture_expect(conn):
# stash connections so that we can verify they all get closed
conns.append(conn)
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201, give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
def test_PUT_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'Connection': 'close'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def test_GET_transfer_encoding_chunked(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'transfer-encoding': 'chunked'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Transfer-Encoding'], 'chunked')
def _test_removes_swift_bytes(self, method):
req = swift.common.swob.Request.blank('/v1/a/c/o', method=method)
with set_http_connect(
200, headers={'content-type': 'image/jpeg; swift_bytes=99'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'], 'image/jpeg')
def test_GET_removes_swift_bytes(self):
self._test_removes_swift_bytes('GET')
def test_HEAD_removes_swift_bytes(self):
self._test_removes_swift_bytes('HEAD')
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id'
stdout = BytesIO()
with set_http_connect(503, 200), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for line in stdout.getvalue().splitlines():
self.assertIn('my-txn-id', line)
self.assertIn('From Object Server', stdout.getvalue())
def test_GET_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [503] * self.obj_ring.replicas + [200]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (self.obj_ring.replicas +
self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_PUT_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
with mock.patch('time.time', lambda: t):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'),
expected_delete_at)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
ts_iter = itertools.repeat(put_timestamp)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_older(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(next(ts).internal)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_newer(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
orig_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(orig_timestamp)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [201] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_missing_backend_timestamp(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([None, None, None])
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_other_weird_success_response(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(201, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_if_none_match(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'If-None-Match': '*',
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(412, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_container_sync_put_x_timestamp_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# object nodes they respond 409 because another in-flight request
# finished and now the on disk timestamp is equal to the request.
put_ts = [put_timestamp] * self.obj_ring.replicas
codes = [409] * self.obj_ring.replicas
ts_iter = iter(put_ts)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_unsynced_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# only one in-flight request finished
put_ts = [None] * (self.obj_ring.replicas - 1)
put_resp = [201] * (self.obj_ring.replicas - 1)
put_ts += [put_timestamp]
put_resp += [409]
ts_iter = iter(put_ts)
codes = put_resp
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
@patch_policies(
[StoragePolicy(0, '1-replica', True),
StoragePolicy(1, '5-replica', False),
StoragePolicy(2, '8-replica', False),
StoragePolicy(3, '15-replica', False)],
fake_ring_args=[
{'replicas': 1}, {'replicas': 5}, {'replicas': 8}, {'replicas': 15}])
class TestReplicatedObjControllerVariousReplicas(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
@contextmanager
def capture_http_requests(get_response):
class FakeConn(object):
def __init__(self, req):
self.req = req
self.resp = None
def getresponse(self):
self.resp = get_response(self.req)
return self.resp
class ConnectionLog(object):
def __init__(self):
self.connections = []
def __len__(self):
return len(self.connections)
def __getitem__(self, i):
return self.connections[i]
def __iter__(self):
return iter(self.connections)
def __call__(self, ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
conn = FakeConn(req)
self.connections.append(conn)
return conn
fake_conn = ConnectionLog()
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
@patch_policies(with_ec_default=True)
class TestECObjController(BaseObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _add_frag_index(self, index, headers):
# helper method to add a frag index header to an existing header dict
hdr_name = 'X-Object-Sysmeta-Ec-Frag-Index'
return dict(headers.items() + [(hdr_name, index)])
def test_determine_chunk_destinations(self):
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = i
self.assertEqual(got, expected)
# now lets make a handoff at the end
orig_index = putters[-1].node_index = None
putters[-1].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[-1].node_index = orig_index
# now lets make a handoff at the start
putters[0].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[0].node_index = 0
# now lets make a handoff in the middle
putters[2].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[2].node_index = 2
# now lets make all of them handoffs
for index in range(self.policy.object_ring.replica_count):
putters[index].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(sorted(got), sorted(expected))
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_statuses = [200] * self.policy.ec_ndata
get_hdrs = [{'Connection': 'close'}] * self.policy.ec_ndata
with set_http_connect(*get_statuses, headers=get_hdrs):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def _test_if_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 200)
self.assertEqual(resp.status_int, 200)
# match
resp = _do_test('"data_etag"', 200)
self.assertEqual(resp.status_int, 200)
# no match
resp = _do_test('"frag_etag"', 412)
self.assertEqual(resp.status_int, 412)
# match wildcard against an alternate etag
resp = _do_test('*', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# match against an alternate etag
resp = _do_test('"alt_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# no match against an alternate etag
resp = _do_test('"data_etag"', 412,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 412)
def test_GET_if_match(self):
self._test_if_match('GET')
def test_HEAD_if_match(self):
self._test_if_match('HEAD')
def _test_if_none_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-None-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 304)
self.assertEqual(resp.status_int, 304)
# match
resp = _do_test('"data_etag"', 304)
self.assertEqual(resp.status_int, 304)
# no match
resp = _do_test('"frag_etag"', 200)
self.assertEqual(resp.status_int, 200)
# match wildcard against an alternate etag
resp = _do_test('*', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# match against an alternate etag
resp = _do_test('"alt_etag"', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# no match against an alternate etag
resp = _do_test('"data_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
def test_GET_if_none_match(self):
self._test_if_none_match('GET')
def test_HEAD_if_none_match(self):
self._test_if_none_match('HEAD')
def test_GET_simple_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = [200] * self.policy.ec_ndata
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_resp = [503] + [200] * self.policy.ec_ndata
with set_http_connect(*get_resp):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_with_body(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
fragment_payloads = []
for chunk in chunks:
fragments = self.policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(
fragments * self.policy.ec_duplication_factor)
# sanity
sanity_body = ''
for fragment_payload in fragment_payloads:
sanity_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(real_body), len(sanity_body))
self.assertEqual(real_body, sanity_body)
# list(zip(...)) for py3 compatibility (zip is lazy there)
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(real_body), len(resp.body))
self.assertEqual(real_body, resp.body)
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_body_and_bad_etag(self):
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
conns = []
def capture_expect(conn):
# stash the backend connection so we can verify that it is closed
# (no data will be sent)
conns.append(conn)
# send a bad etag in the request headers
headers = {'Etag': 'bad etag'}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', headers=headers, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers,
give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
# make the footers callback send a bad Etag footer
footers_callback = make_footers_callback('not the test body')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
def test_txn_id_logging_ECPUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
codes = [(100, Timeout(), 503, 503)] * self.replicas()
stdout = BytesIO()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get ',
stdout.getvalue())
def test_PUT_with_explicit_commit_status(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 100, 201)] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [503] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [(100, 503, Exception('not used'))] * (
self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_mostly_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.quorum()
if isinstance(self.policy, ECStoragePolicy):
codes *= self.policy.ec_duplication_factor
codes += [201] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_commit_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Timeout(), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_commit_exception(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Exception('kaboom!'), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_ec_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_ec_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.Timeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_PUT_with_body(self):
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
# make the footers callback not include Etag footer so that we can
# verify that the correct EC-calculated Etag is included in footers
# sent to backend
footers_callback = make_footers_callback()
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env)
etag = md5(test_body).hexdigest()
size = len(test_body)
req.body = test_body
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['backend-content-length'] = headers[
'X-Backend-Obj-Content-Length']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
frag_archives = []
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
self.assertIsNotNone(info['boundary'],
"didn't get boundary for conn %r" % (
connection_id,))
self.assertTrue(size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# attach the body to frag_archives list
self.assertEqual(obj_part['X-Document'], 'object body')
frag_archives.append(obj_part.get_payload())
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
# validate some footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
# update expected with footers from the callback...
footers_callback(expected)
expected.update({
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Backend-Container-Update-Override-Size': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Container-Update-Override-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_part.get_payload()).hexdigest()})
for header, value in expected.items():
self.assertEqual(footer_metadata[header], value)
# sanity on commit message
self.assertEqual(commit_part['X-Document'], 'put commit')
self.assertEqual(len(frag_archives), self.replicas())
fragment_size = self.policy.fragment_size
node_payloads = []
for fa in frag_archives:
payload = [fa[x:x + fragment_size]
for x in range(0, len(fa), fragment_size)]
node_payloads.append(payload)
fragment_payloads = zip(*node_payloads)
expected_body = ''
for fragment_payload in fragment_payloads:
self.assertEqual(len(fragment_payload), self.replicas())
if True:
fragment_payload = list(fragment_payload)
expected_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(test_body), len(expected_body))
self.assertEqual(test_body, expected_body)
def test_PUT_with_footers(self):
# verify footers supplied by a footers callback being added to
# trailing metadata
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
etag = md5(test_body).hexdigest()
size = len(test_body)
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
def do_test(footers_to_add, expect_added):
put_requests = defaultdict(
lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
def footers_callback(footers):
footers.update(footers_to_add)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n"
% info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# validate EC footer metadata - should always be present
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertIsNotNone(
footer_metadata.pop('X-Object-Sysmeta-Ec-Frag-Index'))
expected = {
'X-Object-Sysmeta-Ec-Scheme':
self.policy.ec_scheme_description,
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_part.get_payload()).hexdigest()}
expected.update(expect_added)
for header, value in expected.items():
self.assertIn(header, footer_metadata)
self.assertEqual(value, footer_metadata[header])
footer_metadata.pop(header)
self.assertFalse(footer_metadata)
# sanity check - middleware sets no footer, expect EC overrides
footers_to_add = {}
expect_added = {
'X-Backend-Container-Update-Override-Size': str(size),
'X-Backend-Container-Update-Override-Etag': etag}
do_test(footers_to_add, expect_added)
# middleware cannot overwrite any EC sysmeta
footers_to_add = {
'X-Object-Sysmeta-Ec-Content-Length': str(size + 1),
'X-Object-Sysmeta-Ec-Etag': 'other etag',
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size + 1),
'X-Object-Sysmeta-Ec-Unused-But-Reserved': 'ignored'}
do_test(footers_to_add, expect_added)
# middleware can add x-object-sysmeta- headers including
# x-object-sysmeta-container-update-override headers
footers_to_add = {
'X-Object-Sysmeta-Foo': 'bar',
'X-Object-Sysmeta-Container-Update-Override-Size':
str(size + 1),
'X-Object-Sysmeta-Container-Update-Override-Etag': 'other etag',
'X-Object-Sysmeta-Container-Update-Override-Ping': 'pong'
}
expect_added.update(footers_to_add)
do_test(footers_to_add, expect_added)
# middleware can also overwrite x-backend-container-update-override
# headers
override_footers = {
'X-Backend-Container-Update-Override-Wham': 'bam',
'X-Backend-Container-Update-Override-Size': str(size + 2),
'X-Backend-Container-Update-Override-Etag': 'another etag'}
footers_to_add.update(override_footers)
expect_added.update(override_footers)
do_test(footers_to_add, expect_added)
def test_PUT_old_obj_server(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
responses = [
# one server will response 100-continue but not include the
# needful expect headers and the connection will be dropped
((100, Exception('not used')), {}),
] + [
# and pleanty of successful responses too
(201, {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes',
}),
] * self.replicas()
random.shuffle(responses)
if responses[-1][0] != 201:
# whoops, stupid random
responses = responses[1:] + [responses[0]]
codes, expect_headers = zip(*responses)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def _make_ec_archive_bodies(self, test_body, policy=None):
policy = policy or self.policy
return encode_frag_archive_bodies(policy, test_body)
def _make_ec_object_stub(self, test_body=None, policy=None,
timestamp=None):
policy = policy or self.policy
return make_ec_object_stub(test_body, policy, timestamp)
def _fake_ec_node_response(self, node_frags):
return fake_ec_node_response(node_frags, self.policy)
def test_GET_with_frags_swapped_around(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-657]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
_part, primary_nodes = self.obj_ring.get_nodes('a', 'c', 'o')
node_key = lambda n: (n['ip'], n['port'])
backend_index = self.policy.get_backend_index
ts = self._ts_iter.next()
response_map = {
node_key(n): StubResponse(
200, ec_archive_bodies[backend_index(i)], {
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Frag-Index': backend_index(i),
'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal
}) for i, n in enumerate(primary_nodes)
}
# swap a parity response into a data node
data_node = random.choice(primary_nodes[:self.policy.ec_ndata])
parity_node = random.choice(
primary_nodes[
self.policy.ec_ndata:self.policy.ec_n_unique_fragments])
(response_map[node_key(data_node)],
response_map[node_key(parity_node)]) = \
(response_map[node_key(parity_node)],
response_map[node_key(data_node)])
def get_response(req):
req_key = (req['ip'], req['port'])
return response_map.pop(req_key)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log), self.policy.ec_ndata)
self.assertEqual(len(response_map),
len(primary_nodes) - self.policy.ec_ndata)
def test_GET_with_no_success(self):
node_frags = [[]] * 28 # no frags on any node
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_only_handoffs(self):
obj1 = self._make_ec_object_stub()
node_frags = [[]] * self.replicas() # all primaries missing
node_frags = node_frags + [ # handoffs
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj1, 'frag': 9},
{'obj': obj1, 'frag': 10}, # parity
{'obj': obj1, 'frag': 11}, # parity
{'obj': obj1, 'frag': 12}, # parity
{'obj': obj1, 'frag': 13}, # parity
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
collected_responses = defaultdict(list)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].append(index)
# GETS would be required to all primaries and then ndata handoffs
self.assertEqual(len(log), self.replicas() + self.policy.ec_ndata)
self.assertEqual(2, len(collected_responses))
# 404s
self.assertEqual(self.replicas(), len(collected_responses[None]))
self.assertEqual(self.policy.ec_ndata,
len(collected_responses[obj1['etag']]))
def test_GET_with_single_missed_overwrite_does_not_need_handoff(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed over write
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj2, 'frag': 6},
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj2, 'frag': 9},
{'obj': obj2, 'frag': 10}, # parity
{'obj': obj2, 'frag': 11}, # parity
{'obj': obj2, 'frag': 12}, # parity
{'obj': obj2, 'frag': 13}, # parity
# {'obj': obj2, 'frag': 2}, # handoff (not used in this test)
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# because the primary nodes are shuffled, it's possible the proxy
# didn't even notice the missed overwrite frag - but it might have
self.assertLessEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(timestamp=self.ts())
obj2 = self._make_ec_object_stub(timestamp=self.ts())
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
{'obj': obj2, 'frag': 9},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
# handoffs are iter'd in order so proxy will see 404 from this
# final handoff
[],
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_duplicate_but_sufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
{'obj': obj1, 'frag': 5}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# expect a request to all primaries plus one handoff
self.assertEqual(self.replicas() + 1, len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_duplicate_and_hidden_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
[{'obj': obj1, 'frag': 0}, {'obj': obj1, 'frag': 5}],
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# Expect a maximum of one request to each primary plus one extra
# request to node 1. Actual value could be less if the extra request
# occurs and quorum is reached before requests to nodes with a
# duplicate frag.
self.assertLessEqual(len(log), self.replicas() + 1)
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_duplicate_but_insufficient_frag(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, but fails to find one
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
] + [[]] * 14 # 404 from handoffs
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# expect a request to all nodes
self.assertEqual(2 * self.replicas(), len(log))
collected_indexes = defaultdict(list)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata - 1)
self.assertEqual({obj1['etag'], None}, collected_etags)
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
obj3 = self._make_ec_object_stub()
obj4 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj3, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
{'obj': obj3, 'frag': 8},
{'obj': obj4, 'frag': 8},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(
{obj1['etag'], obj2['etag'], obj3['etag'], obj4['etag']},
collected_etags)
self.assertEqual({200}, collected_status)
def test_GET_with_quorum_durable_files(self):
# verify that only (ec_nparity + 1) nodes need to be durable for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': True}, # durable
{'obj': obj1, 'frag': 2, 'durable': True}, # durable
{'obj': obj1, 'frag': 3, 'durable': True}, # durable
{'obj': obj1, 'frag': 4, 'durable': True}, # durable
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] # handoffs not used in this scenario
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
self.assertEqual(self.policy.ec_ndata, len(log))
collected_durables = []
for conn in log:
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many durables are
# returned but it must be at least 1 and cannot exceed 5
self.assertLessEqual(len(collected_durables), 5)
self.assertGreaterEqual(len(collected_durables), 1)
def test_GET_with_single_durable_file(self):
# verify that a single durable is sufficient for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] # handoffs not used in this scenario
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
collected_durables = []
for conn in log:
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many non-durables
# are returned before the durable, but we do expect a single durable
self.assertEqual(1, len(collected_durables))
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(28, len(log))
def test_GET_with_missing_durable_files_and_mixed_etags(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
# non-quorate durables for another object won't stop us finding the
# quorate object
node_frags = [
# ec_ndata - 1 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
{'obj': obj2, 'frag': 7, 'durable': True},
{'obj': obj2, 'frag': 8, 'durable': True},
# ec_ndata frags of obj1 are available and one is durable
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': True},
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# Quorum of non-durables for a different object won't
# prevent us hunting down the durable object
node_frags = [
# primaries
{'obj': obj2, 'frag': 0, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 2, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 9, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 12, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': False},
# handoffs
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': True}, # parity
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
def test_GET_with_missing_durables_and_older_durables(self):
# scenario: non-durable frags of newer obj1 obscure all durable frags
# of older obj2, so first 14 requests result in a non-durable set.
# At that point (or before) the proxy knows that a durable set of
# frags for obj2 exists so will fetch them, requiring another 10
# directed requests.
obj2 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': True}],
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
# scenario: obj3 has 14 frags but only 2 are durable and these are
# obscured by two non-durable frags of obj1. There is also a single
# non-durable frag of obj2. The proxy will need to do at least 10
# GETs to see all the obj3 frags plus 1 more to GET a durable frag.
# The proxy may also do one more GET if the obj2 frag is found.
# i.e. 10 + 1 durable for obj3, 2 for obj1 and 1 more if obj2 found
obj2 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
obj3 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 0, 'durable': True}],
[{'obj': obj1, 'frag': 1, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 1, 'durable': True}],
[{'obj': obj2, 'frag': 2, 'durable': False}, # obj2 frag
{'obj': obj3, 'frag': 2, 'durable': False}],
[{'obj': obj3, 'frag': 3, 'durable': False}],
[{'obj': obj3, 'frag': 4, 'durable': False}],
[{'obj': obj3, 'frag': 5, 'durable': False}],
[{'obj': obj3, 'frag': 6, 'durable': False}],
[{'obj': obj3, 'frag': 7, 'durable': False}],
[{'obj': obj3, 'frag': 8, 'durable': False}],
[{'obj': obj3, 'frag': 9, 'durable': False}],
[{'obj': obj3, 'frag': 10, 'durable': False}],
[{'obj': obj3, 'frag': 11, 'durable': False}],
[{'obj': obj3, 'frag': 12, 'durable': False}],
[{'obj': obj3, 'frag': 13, 'durable': False}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj3['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj3['etag'])
self.assertGreaterEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(log), self.policy.ec_ndata + 4)
def test_GET_with_missing_durables_and_older_non_durables(self):
# scenario: non-durable frags of newer obj1 obscure all frags
# of older obj2, so first 28 requests result in a non-durable set.
# There are only 10 frags for obj2 and one is not durable.
obj2 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False}], # obj2 non-durable
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
[], # 1 empty primary
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj2 frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj2 frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# this scenario should never occur but if there are somehow
# fragments for different content at the same timestamp then the
# object controller should handle it gracefully
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, test_body='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, test_body='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertFalse(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7)
def test_GET_mixed_success_with_range(self):
fragment_size = self.policy.fragment_size
ec_stub = self._make_ec_object_stub()
frag_archives = ec_stub['frags']
frag_archive_size = len(ec_stub['frags'][0])
headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self._ts_iter.next()).normal,
}
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
# data nodes with old frag
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
# hopefully we ask for two more
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
# verify that even when last responses to be collected are 416's
# the shortfall of 2xx responses still triggers extra spawned requests
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
# data nodes with old frag
StubResponse(416, frag_index=5),
# hopefully we ask for one more
StubResponse(416, frag_index=6),
# and hopefully we ask for another
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_range_unsatisfiable_mixed_success(self):
responses = [
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
# sneak in bogus extra responses
StubResponse(404),
StubResponse(206, frag_index=8),
# and then just "enough" more 416's
StubResponse(416, frag_index=9),
StubResponse(416, frag_index=10),
StubResponse(416, frag_index=11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
# ec_ndata responses that must agree, plus the bogus extras
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_missing_and_range_unsatisifiable(self):
responses = [ # not quite ec_ndata frags on primaries
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(416, frag_index=7),
StubResponse(416, frag_index=8),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
# TODO: does 416 make sense without a quorum, or should this be a 404?
# a non-range GET of same object would return 404
self.assertEqual(resp.status_int, 416)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_507_will_503(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# bad disk on all other nodes
return responses.pop(0) if responses else StubResponse(507)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_404_will_404(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# no frags on other nodes
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_mixed_ranged_responses_success(self):
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
new_data = ('test' * segment_size)[:-492]
new_etag = md5(new_data).hexdigest()
new_archives = self._make_ec_archive_bodies(new_data)
old_data = ('junk' * segment_size)[:-492]
old_etag = md5(old_data).hexdigest()
old_archives = self._make_ec_archive_bodies(old_data)
frag_archive_size = len(new_archives[0])
# here we deliberately omit X-Backend-Data-Timestamp to check that
# proxy will tolerate responses from object server that have not been
# upgraded to send that header
old_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(old_data),
'X-Object-Sysmeta-Ec-Etag': old_etag,
'X-Backend-Timestamp': Timestamp(self._ts_iter.next()).internal
}
new_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(new_data),
'X-Object-Sysmeta-Ec-Etag': new_etag,
'X-Backend-Timestamp': Timestamp(self._ts_iter.next()).internal
}
# 7 primaries with stale frags, 3 handoffs failed to get new frags
responses = [
StubResponse(206, old_archives[0][:frag_size], old_headers, 0),
StubResponse(206, new_archives[1][:frag_size], new_headers, 1),
StubResponse(206, old_archives[2][:frag_size], old_headers, 2),
StubResponse(206, new_archives[3][:frag_size], new_headers, 3),
StubResponse(206, old_archives[4][:frag_size], old_headers, 4),
StubResponse(206, new_archives[5][:frag_size], new_headers, 5),
StubResponse(206, old_archives[6][:frag_size], old_headers, 6),
StubResponse(206, new_archives[7][:frag_size], new_headers, 7),
StubResponse(206, old_archives[8][:frag_size], old_headers, 8),
StubResponse(206, new_archives[9][:frag_size], new_headers, 9),
StubResponse(206, old_archives[10][:frag_size], old_headers, 10),
StubResponse(206, new_archives[11][:frag_size], new_headers, 11),
StubResponse(206, old_archives[12][:frag_size], old_headers, 12),
StubResponse(206, new_archives[13][:frag_size], new_headers, 13),
StubResponse(206, new_archives[0][:frag_size], new_headers, 0),
StubResponse(404),
StubResponse(404),
StubResponse(206, new_archives[6][:frag_size], new_headers, 6),
StubResponse(404),
StubResponse(206, new_archives[10][:frag_size], new_headers, 10),
StubResponse(206, new_archives[12][:frag_size], new_headers, 12),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, new_data[:segment_size])
self.assertEqual(len(log), self.policy.ec_ndata + 10)
def test_GET_mismatched_fragment_archives(self):
segment_size = self.policy.ec_segment_size
test_data1 = ('test' * segment_size)[:-333]
# N.B. the object data *length* here is different
test_data2 = ('blah1' * segment_size)[:-333]
etag1 = md5(test_data1).hexdigest()
etag2 = md5(test_data2).hexdigest()
ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
# here we're going to *lie* and say the etag here matches
headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
responses1 = [(200, body, self._add_frag_index(fi, headers1))
for fi, body in enumerate(ec_archive_bodies1)]
responses2 = [(200, body, self._add_frag_index(fi, headers2))
for fi, body in enumerate(ec_archive_bodies2)]
req = swob.Request.blank('/v1/a/c/o')
# sanity check responses1
responses = responses1[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag1)
# sanity check responses2
responses = responses2[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag2)
# now mix the responses a bit
mix_index = random.randint(0, self.policy.ec_ndata - 1)
mixed_responses = responses1[:self.policy.ec_ndata]
mixed_responses[mix_index] = responses2[mix_index]
status_codes, body_iter, headers = zip(*mixed_responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
try:
resp.body
except ECDriverError:
resp._app_iter.close()
else:
self.fail('invalid ec fragment response body did not blow up!')
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
msg = error_lines[0]
self.assertIn('Error decoding fragments', msg)
self.assertIn('/a/c/o', msg)
log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
def test_GET_read_timeout(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.01
responses = [
(200, SlowBody(body, 0.1), self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies)
] * self.policy.ec_duplication_factor
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses + [
(404, '', {}) for i in range(
self.policy.object_ring.max_more_nodes)])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# do this inside the fake http context manager, it'll try to
# resume but won't be able to give us all the right bytes
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(self.replicas(), len(error_lines))
nparity = self.policy.ec_nparity
for line in error_lines[:nparity]:
self.assertIn('retrying', line)
for line in error_lines[nparity:]:
self.assertIn('ChunkReadTimeout (0.01s)', line)
def test_GET_read_timeout_resume(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.05
# first one is slow
responses = [(200, SlowBody(ec_archive_bodies[0], 0.1),
self._add_frag_index(0, headers))]
# ... the rest are fine
responses += [(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[1:], start=1)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata + 1])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertTrue(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertIn('retrying', error_lines[0])
def test_fix_response_HEAD(self):
headers = {'X-Object-Sysmeta-Ec-Content-Length': '10',
'X-Object-Sysmeta-Ec-Etag': 'foo'}
# sucsessful HEAD
responses = [(200, '', headers)]
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, '')
# 200OK shows original object content length
self.assertEqual(resp.headers['Content-Length'], '10')
self.assertEqual(resp.headers['Etag'], 'foo')
# not found HEAD
responses = [(404, '', {})] * self.replicas() * 2
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# 404 shows actual response body size (i.e. 0 for HEAD)
self.assertEqual(resp.headers['Content-Length'], '0')
def test_PUT_with_slow_commits(self):
# It's important that this timeout be much less than the delay in
# the slow commit responses so that the slow commits are not waited
# for.
self.app.post_quorum_timeout = 0.01
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
# plenty of slow commits
response_sleep = 5.0
codes = [FakeStatus(201, response_sleep=response_sleep)
for i in range(self.replicas())]
# swap out some with regular fast responses
number_of_fast_responses_needed_to_be_quick_enough = \
self.policy.quorum
fast_indexes = random.sample(
range(self.replicas()),
number_of_fast_responses_needed_to_be_quick_enough)
for i in fast_indexes:
codes[i] = 201
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
start = time.time()
resp = req.get_response(self.app)
response_time = time.time() - start
self.assertEqual(resp.status_int, 201)
self.assertLess(response_time, response_sleep)
def test_PUT_with_just_enough_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata + 1)
codes += [503] * (self.policy.ec_nparity - 1)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_less_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata)
codes += [503] * (self.policy.ec_nparity)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_GET_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body).hexdigest()
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments * self.policy.ec_duplication_factor]
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertGreaterEqual(start, 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
if start >= segment_size:
responses = [(416, range_not_satisfiable_body,
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, ''.join(node_fragments[i]),
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
class TestECFunctions(unittest.TestCase):
def test_chunk_transformer(self):
def do_test(dup_factor, segments):
segment_size = 1024
orig_chunks = []
for i in range(segments):
orig_chunks.append(chr(i + 97) * segment_size)
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(
replicas=10 * dup_factor),
ec_segment_size=segment_size,
ec_duplication_factor=dup_factor)
encoded_chunks = [[] for _ in range(policy.ec_n_unique_fragments)]
for orig_chunk in orig_chunks:
# each segment produces a set of frags
frag_set = policy.pyeclib_driver.encode(orig_chunk)
for frag_index, frag_data in enumerate(frag_set):
encoded_chunks[frag_index].append(frag_data)
# chunk_transformer buffers and concatenates multiple frags
expected = [''.join(frags) for frags in encoded_chunks]
transform = obj.chunk_transformer(policy)
transform.send(None)
backend_chunks = transform.send(''.join(orig_chunks))
self.assertIsNotNone(backend_chunks) # sanity
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
# flush out last chunk buffer
backend_chunks = transform.send('')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual([''] * policy.ec_n_unique_fragments,
backend_chunks)
do_test(dup_factor=1, segments=1)
do_test(dup_factor=2, segments=1)
do_test(dup_factor=3, segments=1)
do_test(dup_factor=1, segments=2)
do_test(dup_factor=2, segments=2)
do_test(dup_factor=3, segments=2)
def test_chunk_transformer_non_aligned_last_chunk(self):
last_chunk = 'a' * 128
def do_test(dup):
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10 * dup),
ec_segment_size=1024,
ec_duplication_factor=dup)
expected = policy.pyeclib_driver.encode(last_chunk)
transform = obj.chunk_transformer(policy)
transform.send(None)
transform.send(last_chunk)
# flush out last chunk buffer
backend_chunks = transform.send('')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
do_test(1)
do_test(2)
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096,
ec_duplication_factor=2),
StoragePolicy(1, name='unu')],
fake_ring_args=[{'replicas': 28}, {}])
class TestECDuplicationObjController(
BaseObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _make_ec_object_stub(self, test_body=None, policy=None,
timestamp=None):
policy = policy or self.policy
return make_ec_object_stub(test_body, policy, timestamp)
def _make_ec_archive_bodies(self, test_body, policy=None):
policy = policy or self.policy
return encode_frag_archive_bodies(policy, test_body)
def _fake_ec_node_response(self, node_frags):
return fake_ec_node_response(node_frags, self.policy)
def _test_GET_with_duplication_factor(self, node_frags, obj):
# This is basic tests in the healthy backends status
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# the backend requests should be >= num_data_fragments
self.assertGreaterEqual(len(log), self.policy.ec_ndata)
# but <= # of replicas
self.assertLessEqual(len(log), self.replicas())
self.assertEqual(len(collected_responses), 1)
etag, frags = collected_responses.items()[0]
# the backend requests will stop at enough ec_ndata responses
self.assertEqual(
len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (len(frags), etag))
# TODO: actually "frags" in node_frags is meaning "node_index" right now
# in following tests. Reconsidering the name and semantics change needed.
# Or, just mapping to be correct as frag_index is enough?.
def test_GET_with_duplication_factor(self):
obj = self._make_ec_object_stub()
node_frags = [
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
] * 2 # duplicated!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_duplication_factor_almost_duplicate_dispersion(self):
obj = self._make_ec_object_stub()
node_frags = [
# first half of # of replicas are 0, 1, 2, 3, 4, 5, 6
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 6},
# second half of # of replicas are 7, 8, 9, 10, 11, 12, 13
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
{'obj': obj, 'frag': 13},
]
# ...but it still works!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
# both of obj1 and obj2 has only 9 frags which is not able to decode
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
]
# ... and the rests are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust to the last of handoffs
self.assertEqual(len(log), self.replicas() * 2)
# we have obj1, obj2, and 404 NotFound in collected_responses
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_duplicate_but_insufficient_frag(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, but fails to find one
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
# ... and the rests are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# expect a request to all nodes
self.assertEqual(2 * self.replicas(), len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata - 1)
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
# primaries
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
]
node_frags = node_frags * 2 # 2 duplication
# so the primaries have indexes 0, 1, 3, 4, 5, 7, 8, 12, 13
# (9 indexes) for obj2 and then a handoff has index 6
node_frags += [
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_duplicate_but_sufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
# proxy will access randomly to a node in the second set
# so to ensure the GET fragment meets what it needs.
node_frags += [{'obj': obj1, 'frag': 5}]
# rests are 404s
node_frags += [[]] * (self.replicas() - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# expect a request to all primaries plus one handoff
self.assertGreaterEqual(
len(log), self.policy.ec_n_unique_fragments + 1)
self.assertLessEqual(len(log), self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(timestamp=self.ts())
obj2 = self._make_ec_object_stub(timestamp=self.ts())
# 28 nodes are here
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
[],
]
node_frags += [[]] * 13 # Plus 13 nodes in handoff
# finally 10th fragment for obj2 found
node_frags += [[{'obj': obj2, 'frag': 9}]]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
obj3 = self._make_ec_object_stub()
obj4 = self._make_ec_object_stub()
obj5 = self._make_ec_object_stub()
obj6 = self._make_ec_object_stub()
obj7 = self._make_ec_object_stub()
# primaries and handoffs for required nodes
# this is 10-4 * 2 case so that 56 requests (2 * replicas) required
# to give up. we prepares 7 different objects above so responses
# will have 8 fragments for each object
required_nodes = self.replicas() * 2
# fill them out to the primary and handoff nodes
node_frags = []
for frag in range(8):
for stub_obj in (obj1, obj2, obj3, obj4, obj5, obj6, obj7):
if len(node_frags) >= required_nodes:
# we already have enough responses
break
node_frags.append({'obj': stub_obj, 'frag': frag})
# sanity
self.assertEqual(required_nodes, len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
self.assertEqual(required_nodes, len(log))
self.assertEqual(len(collected_etags), 7)
self.assertEqual({200}, collected_status)
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
]
node_frags = node_frags * 2 # 2 duplications
node_frags += [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(self.replicas() * 2, len(log))
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
obj3 = self._make_ec_object_stub()
obj4 = self._make_ec_object_stub()
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
# plus, 4 different objects and 7 indexes will b 28 node responses
# here for handoffs
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj4, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj4, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj4, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj4, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj4, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj4, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj4, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# the difference from parent class is only handoff stub length
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, test_body='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, test_body='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
# handoffs
]
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertFalse(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7)
def _test_determine_chunk_destinations_prioritize(
self, missing_two, missing_one):
# This scenario is only likely for ec_duplication_factor >= 2. If we
# have multiple failures such that the putters collection is missing
# two primary nodes for frag index 'missing_two' and missing one
# primary node for frag index 'missing_one', then we should prioritize
# finding a handoff for frag index 'missing_two'.
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# sanity, caller must set missing_two < than ec_num_unique_fragments
self.assertLess(missing_two, self.policy.ec_n_unique_fragments)
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
# sanity - all putters have primary nodes
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = self.policy.get_backend_index(i)
self.assertEqual(got, expected)
# now, for fragment index that is missing two copies, lets make one
# putter be a handoff
handoff_putter = putters[missing_two]
handoff_putter.node_index = None
# and then pop another putter for a copy of same fragment index
putters.pop(missing_two + self.policy.ec_n_unique_fragments)
# also pop one copy of a different fragment to make one missing hole
putters.pop(missing_one)
# then determine chunk destinations: we have 26 putters here;
# missing_two frag index is missing two copies; missing_one frag index
# is missing one copy, therefore the handoff node should be assigned to
# missing_two frag index
got = controller._determine_chunk_destinations(putters, self.policy)
# N.B. len(putters) is now len(expected - 2) due to pop twice
self.assertEqual(len(putters), len(got))
# sanity, no node index - for handoff putter
self.assertIsNone(handoff_putter.node_index)
self.assertEqual(got[handoff_putter], missing_two)
# sanity, other nodes except handoff_putter have node_index
self.assertTrue(all(
[putter.node_index is not None for putter in got if
putter != handoff_putter]))
def test_determine_chunk_destinations_prioritize_more_missing(self):
# drop node_index 0, 14 and 1 should work
self._test_determine_chunk_destinations_prioritize(0, 1)
# drop node_index 1, 15 and 0 should work, too
self._test_determine_chunk_destinations_prioritize(1, 0)
if __name__ == '__main__':
unittest.main()
| redbo/swift | test/unit/proxy/controllers/test_obj.py | Python | apache-2.0 | 193,924 |
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020, 2021, 2022
# Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
| mosra/m.css | documentation/test/__init__.py | Python | mit | 1,229 |
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.powermax import utils
LOG = logging.getLogger(__name__)
WRITE_DISABLED = "Write Disabled"
UNLINK_INTERVAL = 15
UNLINK_RETRIES = 30
class PowerMaxProvision(object):
"""Provisioning Class for Dell EMC PowerMax volume drivers.
It supports VMAX 3, All Flash and PowerMax arrays.
"""
def __init__(self, rest):
self.utils = utils.PowerMaxUtils()
self.rest = rest
def create_storage_group(
self, array, storagegroup_name, srp, slo, workload,
extra_specs, do_disable_compression=False):
"""Create a new storage group.
:param array: the array serial number
:param storagegroup_name: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extra_specs: additional info
:param do_disable_compression: disable compression flag
:returns: storagegroup - storage group object
"""
start_time = time.time()
@coordination.synchronized("emc-sg-{storage_group}-{array}")
def do_create_storage_group(storage_group, array):
# Check if storage group has been recently created
storagegroup = self.rest.get_storage_group(
array, storagegroup_name)
if storagegroup is None:
storagegroup = self.rest.create_storage_group(
array, storage_group, srp, slo, workload, extra_specs,
do_disable_compression)
LOG.debug("Create storage group took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
LOG.info("Storage group %(sg)s created successfully.",
{'sg': storagegroup_name})
else:
LOG.info("Storage group %(sg)s already exists.",
{'sg': storagegroup_name})
return storagegroup
return do_create_storage_group(storagegroup_name, array)
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
volume_size, extra_specs, rep_info=None):
"""Create a new volume in the given storage group.
:param array: the array serial number
:param volume_name: the volume name -- string
:param storagegroup_name: the storage group name
:param volume_size: volume size -- string
:param extra_specs: extra specifications
:param rep_info: replication session info dict -- optional
:returns: volume info -- dict
"""
@coordination.synchronized("emc-sg-{storage_group}-{array}")
def do_create_volume_from_sg(storage_group, array):
start_time = time.time()
if rep_info and rep_info.get('initial_device_list', False):
local_device_list = self.rest.get_volume_list(
extra_specs['array'],
{'storageGroupId': storagegroup_name})
rep_info['initial_device_list'] = local_device_list
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs, rep_info)
LOG.debug("Create volume from storage group "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
return volume_dict
return do_create_volume_from_sg(storagegroup_name, array)
def delete_volume_from_srp(self, array, device_id, volume_name):
"""Delete a volume from the srp.
:param array: the array serial number
:param device_id: the volume device id
:param volume_name: the volume name
"""
start_time = time.time()
LOG.debug("Delete volume %(volume_name)s from srp.",
{'volume_name': volume_name})
self.rest.delete_volume(array, device_id)
LOG.debug("Delete volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(
start_time, time.time())})
def create_volume_snapvx(self, array, source_device_id,
snap_name, extra_specs, ttl=0):
"""Create a snapVx of a volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param snap_name: the snapshot name
:param extra_specs: the extra specifications
:param ttl: time to live in hours, defaults to 0
"""
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_create_volume_snap(src_device_id):
start_time = time.time()
LOG.debug("Create Snap Vx snapshot of: %(source)s.",
{'source': src_device_id})
self.rest.create_volume_snap(
array, snap_name, src_device_id, extra_specs, ttl)
LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
do_create_volume_snap(source_device_id)
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False, copy_mode=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param create_snap: Flag for create snapvx
:param copy_mode: If copy mode should be used for SnapVX target links
"""
start_time = time.time()
if create_snap:
# We are creating a temporary snapshot. Specify a ttl of 1 hour
self.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs, ttl=1)
# Link source to target
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_modify_volume_snap(src_device_id):
self.rest.modify_volume_snap(
array, src_device_id, target_device_id, snap_name,
extra_specs, link=True, copy=copy_mode)
do_modify_volume_snap(source_device_id)
LOG.debug("Create element replica took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def unlink_snapvx_tgt_volume(
self, array, target_device_id, source_device_id, snap_name,
extra_specs, snap_id, loop=True):
"""Unlink a snapshot from its target volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param snap_id: the unique snap id of the SnapVX
:param loop: if looping call is required for handling retries
"""
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_unlink_volume(src_device_id):
LOG.debug("Break snap vx link relationship between: %(src)s "
"and: %(tgt)s.",
{'src': src_device_id, 'tgt': target_device_id})
self._unlink_volume(array, src_device_id, target_device_id,
snap_name, extra_specs, snap_id=snap_id,
list_volume_pairs=None, loop=loop)
do_unlink_volume(source_device_id)
def _unlink_volume(
self, array, source_device_id, target_device_id, snap_name,
extra_specs, snap_id=None, list_volume_pairs=None, loop=True):
"""Unlink a target volume from its source volume.
:param array: the array serial number
:param source_device_id: the source device id
:param target_device_id: the target device id
:param snap_name: the snap name
:param extra_specs: extra specifications
:param snap_id: the unique snap id of the SnapVX
:param list_volume_pairs: list of volume pairs, optional
:param loop: if looping call is required for handling retries
:returns: return code
"""
def _unlink_vol():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_vol_success']:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, snap_id=snap_id, unlink=True,
list_volume_pairs=list_volume_pairs)
kwargs['modify_vol_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_volume failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_vol_success']:
raise loopingcall.LoopingCallDone()
if not loop:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, snap_id=snap_id, unlink=True,
list_volume_pairs=list_volume_pairs)
else:
kwargs = {'retries': 0,
'modify_vol_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def delete_volume_snap(self, array, snap_name,
source_device_ids, snap_id=None, restored=False):
"""Delete a snapVx snapshot of a volume.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_ids: the source device ids
:param snap_id: the unique snap id of the SnapVX
:param restored: Flag to indicate if restored session is being deleted
"""
@coordination.synchronized("emc-snapvx-{src_device_id}")
def do_delete_volume_snap(src_device_id):
LOG.debug("Delete SnapVx: %(snap_name)s for source %(src)s and "
"devices %(devs)s.",
{'snap_name': snap_name, 'src': src_device_id,
'devs': source_device_ids})
self.rest.delete_volume_snap(
array, snap_name, source_device_ids, snap_id=snap_id,
restored=restored)
device_id = source_device_ids[0] if isinstance(
source_device_ids, list) else source_device_ids
if snap_id is None:
snap_id = self.rest.get_snap_id(array, device_id, snap_name)
do_delete_volume_snap(device_id)
def is_restore_complete(self, array, source_device_id,
snap_name, snap_id, extra_specs):
"""Check and wait for a restore to complete
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: snapshot name
:param snap_id: unique snap id
:param extra_specs: extra specification
:returns: bool
"""
def _wait_for_restore():
"""Called at an interval until the restore is finished.
:raises: loopingcall.LoopingCallDone
:raises: VolumeBackendAPIException
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['wait_for_restore_called']:
if self._is_restore_complete(
array, source_device_id, snap_name, snap_id):
kwargs['wait_for_restore_called'] = True
except Exception:
exception_message = (_("Issue encountered waiting for "
"restore."))
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
if kwargs['wait_for_restore_called']:
raise loopingcall.LoopingCallDone()
if kwargs['retries'] > int(extra_specs[utils.RETRIES]):
LOG.error("_wait_for_restore failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(
retvalue=int(extra_specs[utils.RETRIES]))
kwargs = {'retries': 0,
'wait_for_restore_called': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_restore)
rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait()
return rc
def _is_restore_complete(
self, array, source_device_id, snap_name, snap_id):
"""Helper function to check if restore is complete.
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: the snapshot name
:param snap_id: unique snap id
:returns: restored -- bool
"""
restored = False
snap_details = self.rest.get_volume_snap(
array, source_device_id, snap_name, snap_id)
if snap_details:
linked_devices = snap_details.get("linkedDevices", [])
for linked_device in linked_devices:
if ('targetDevice' in linked_device and
source_device_id == linked_device['targetDevice']):
if ('state' in linked_device and
linked_device['state'] == "Restored"):
restored = True
return restored
def delete_temp_volume_snap(self, array, snap_name,
source_device_id, snap_id):
"""Delete the temporary snapshot created for clone operations.
There can be instances where the source and target both attempt to
delete a temp snapshot simultaneously, so we must lock the snap and
then double check it is on the array.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
:param snap_id: the unique snap id of the SnapVX
"""
snapvx = self.rest.get_volume_snap(
array, source_device_id, snap_name, snap_id)
if snapvx:
self.delete_volume_snap(
array, snap_name, source_device_id, snap_id=snap_id,
restored=False)
def delete_volume_snap_check_for_links(
self, array, snap_name, source_devices, extra_specs, snap_id):
"""Check if a snap has any links before deletion.
If a snapshot has any links, break the replication relationship
before deletion.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_devices: the source device ids
:param extra_specs: the extra specifications
:param snap_id: the unique snap id of the SnapVX
"""
list_device_pairs = []
if not isinstance(source_devices, list):
source_devices = [source_devices]
for source_device in source_devices:
LOG.debug("Check for linked devices to SnapVx: %(snap_name)s "
"for volume %(vol)s.",
{'vol': source_device, 'snap_name': snap_name})
linked_list = self.rest.get_snap_linked_device_list(
array, source_device, snap_name, snap_id)
if len(linked_list) == 1:
target_device = linked_list[0]['targetDevice']
list_device_pairs.append((source_device, target_device))
else:
for link in linked_list:
# If a single source volume has multiple targets,
# we must unlink each target individually
target_device = link['targetDevice']
self._unlink_volume(
array, source_device, target_device, snap_name,
extra_specs, snap_id=snap_id)
if list_device_pairs:
self._unlink_volume(
array, "", "", snap_name, extra_specs, snap_id=snap_id,
list_volume_pairs=list_device_pairs)
if source_devices:
self.delete_volume_snap(
array, snap_name, source_devices, snap_id, restored=False)
def extend_volume(self, array, device_id, new_size, extra_specs,
rdf_group=None):
"""Extend a volume.
:param array: the array serial number
:param device_id: the volume device id
:param new_size: the new size (GB)
:param extra_specs: the extra specifications
:param rdf_group: the rdf group number, if required
:returns: status_code
"""
start_time = time.time()
if rdf_group:
@coordination.synchronized('emc-rg-{rdf_group}')
def _extend_replicated_volume(rdf_group):
self.rest.extend_volume(array, device_id,
new_size, extra_specs, rdf_group)
_extend_replicated_volume(rdf_group)
else:
self.rest.extend_volume(array, device_id, new_size, extra_specs)
LOG.debug("Extend PowerMax/VMAX volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def get_srp_pool_stats(self, array, array_info):
"""Get the srp capacity stats.
:param array: the array serial number
:param array_info: the array dict
:returns: total_capacity_gb
:returns: remaining_capacity_gb
:returns: subscribed_capacity_gb
:returns: array_reserve_percent
"""
total_capacity_gb = 0
remaining_capacity_gb = 0
subscribed_capacity_gb = 0
array_reserve_percent = 0
srp = array_info['srpName']
LOG.debug(
"Retrieving capacity for srp %(srpName)s on array %(array)s.",
{'srpName': srp, 'array': array})
srp_details = self.rest.get_srp_by_name(array, srp)
if not srp_details:
LOG.error("Unable to retrieve srp instance of %(srpName)s on "
"array %(array)s.",
{'srpName': srp, 'array': array})
return 0, 0, 0, 0, False
try:
srp_capacity = srp_details['srp_capacity']
total_capacity_gb = srp_capacity['usable_total_tb'] * units.Ki
try:
used_capacity_gb = srp_capacity['usable_used_tb'] * units.Ki
remaining_capacity_gb = float(
total_capacity_gb - used_capacity_gb)
except KeyError:
LOG.error("Unable to retrieve remaining_capacity_gb.")
subscribed_capacity_gb = (
srp_capacity['subscribed_total_tb'] * units.Ki)
array_reserve_percent = srp_details['reserved_cap_percent']
except KeyError:
pass
return (total_capacity_gb, remaining_capacity_gb,
subscribed_capacity_gb, array_reserve_percent)
def verify_slo_workload(
self, array, slo, workload, is_next_gen=None, array_model=None):
"""Check if SLO and workload values are valid.
:param array: the array serial number
:param slo: Service Level Object e.g bronze
:param workload: workload e.g DSS
:param is_next_gen: can be None
:returns: boolean
"""
is_valid_slo, is_valid_workload = False, False
if workload and workload.lower() == 'none':
workload = None
if not workload:
is_valid_workload = True
if slo and slo.lower() == 'none':
slo = None
if is_next_gen or is_next_gen is None:
array_model, is_next_gen = self.rest.get_array_model_info(
array)
valid_slos = self.rest.get_slo_list(array, is_next_gen, array_model)
valid_workloads = self.rest.get_workload_settings(array, is_next_gen)
for valid_slo in valid_slos:
if slo == valid_slo:
is_valid_slo = True
break
for valid_workload in valid_workloads:
if workload == valid_workload:
is_valid_workload = True
break
if not slo:
is_valid_slo = True
if workload:
is_valid_workload = False
if not is_valid_slo:
LOG.error(
"SLO: %(slo)s is not valid. Valid values are: "
"%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos})
if not is_valid_workload:
LOG.warning(
"Workload: %(workload)s is not valid. Valid values are "
"%(valid_workloads)s. Note you cannot "
"set a workload without an SLO.",
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:returns: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = 'NONE' if self.rest.is_next_gen_array(array) else (
storage_group['workload'])
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)s. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
@coordination.synchronized('emc-rg-{rdf_group}')
def break_rdf_relationship(self, array, device_id, sg_name,
rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices.
Resuming replication after suspending is necessary where this function
is called from. Doing so in here will disrupt the ability to perform
further actions on the RDFG without suspending again.
:param array: the array serial number
:param device_id: the source device id
:param sg_name: storage group
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair
"""
LOG.info("Suspending RDF group %(rdf)s to delete source device "
"%(dev)s RDF pair.", {'rdf': rdf_group, 'dev': device_id})
if state.lower() == utils.RDF_SYNCINPROG_STATE:
self.rest.wait_for_rdf_pair_sync(
array, rdf_group, device_id, rep_extra_specs)
if state.lower() != utils.RDF_SUSPENDED_STATE:
self.rest.srdf_suspend_replication(
array, sg_name, rdf_group, rep_extra_specs)
self.rest.srdf_delete_device_pair(array, rdf_group, device_id)
def get_or_create_volume_group(self, array, group, extra_specs):
"""Get or create a volume group.
Sometimes it may be necessary to recreate a volume group on the
backend - for example, when the last member volume has been removed
from the group, but the cinder group object has not been deleted.
:param array: the array serial number
:param group: the group object
:param extra_specs: the extra specifications
:returns: group name
"""
vol_grp_name = self.utils.update_volume_group_name(group)
return self.get_or_create_group(array, vol_grp_name, extra_specs)
def get_or_create_group(self, array, group_name, extra_specs):
"""Get or create a generic volume group.
:param array: the array serial number
:param group_name: the group name
:param extra_specs: the extra specifications
:returns: group name
"""
storage_group = self.rest.get_storage_group(array, group_name)
if not storage_group:
self.create_volume_group(array, group_name, extra_specs)
return group_name
def create_volume_group(self, array, group_name, extra_specs):
"""Create a generic volume group.
:param array: the array serial number
:param group_name: the name of the group
:param extra_specs: the extra specifications
:returns: volume_group
"""
return self.create_storage_group(array, group_name,
None, None, None, extra_specs)
def create_group_replica(
self, array, source_group, snap_name, extra_specs):
"""Create a replica (snapVx) of a volume group.
:param array: the array serial number
:param source_group: the source group name
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.",
{'srcGroup': source_group})
# Create snapshot
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
def delete_group_replica(self, array, snap_name, source_group_name):
"""Delete the snapshot.
:param array: the array serial number
:param snap_name: the name for the snap shot
:param source_group_name: the source group name
"""
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name, 'snap_name': snap_name})
snap_id_list = self.rest.get_storage_group_snap_id_list(
array, source_group_name, snap_name)
if snap_id_list:
if not self.rest.is_snap_id:
snap_id_list.sort(reverse=True)
for snap_id in snap_id_list:
self.rest.delete_storagegroup_snap(
array, source_group_name, snap_name, snap_id,
force=True)
else:
LOG.debug("Unable to get snap ids for: %(srcGroup)s.",
{'srcGroup': source_group_name})
def link_and_break_replica(self, array, source_group_name,
target_group_name, snap_name, extra_specs,
list_volume_pairs, delete_snapshot=False,
snap_id=None):
"""Links a group snap and breaks the relationship.
:param array: the array serial
:param source_group_name: the source group name
:param target_group_name: the target group name
:param snap_name: the snapshot name
:param extra_specs: extra specifications
:param list_volume_pairs: the list of volume pairs
:param delete_snapshot: delete snapshot flag
:param snap_id: the unique snapVx identifier
"""
LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
# Link the snapshot
self.rest.modify_volume_snap(
array, None, None, snap_name, extra_specs, snap_id=snap_id,
link=True, list_volume_pairs=list_volume_pairs)
# Unlink the snapshot
LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
self._unlink_volume(
array, None, None, snap_name, extra_specs, snap_id=snap_id,
list_volume_pairs=list_volume_pairs)
# Delete the snapshot if necessary
if delete_snapshot:
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
source_devices = [a for a, b in list_volume_pairs]
self.delete_volume_snap(array, snap_name, source_devices)
def revert_volume_snapshot(self, array, source_device_id,
snap_name, snap_id, extra_specs):
"""Revert a volume snapshot
:param array: the array serial number
:param source_device_id: device id of the source
:param snap_name: snapvx snapshot name
:param snap_id: the unique snap identifier
:param extra_specs: the extra specifications
"""
start_time = time.time()
try:
self.rest.modify_volume_snap(
array, source_device_id, "", snap_name, extra_specs,
snap_id=snap_id, restore=True)
except exception.VolumeBackendAPIException as ex:
if utils.REVERT_SS_EXC in ex.message:
exception_message = _(
"Link must be fully copied for this operation to proceed. "
"Please reset the volume state from error to available "
"and wait for awhile before attempting another "
"revert to snapshot operation. You may want to delete "
"the latest snapshot taken in this revert to snapshot "
"operation, as you will only be able to revert to the "
"last snapshot.")
else:
exception_message = (_(
"Revert to snapshot failed with exception "
"%(e)s.") % {'e': ex})
raise exception.VolumeBackendAPIException(
message=exception_message)
LOG.debug("Restore volume snapshot took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
| openstack/cinder | cinder/volume/drivers/dell_emc/powermax/provision.py | Python | apache-2.0 | 32,178 |
# Call parse directory
import glob
import os
import subprocess
import time
import shutil
from utils.constants import Constants, PYTHON_CODE_FOLDER
PYTHON_COMMAND = 'python'
BASE_FOLDER = Constants.TOPIC_MODEL_FOLDER + 'base/'
CORPUS_FOLDER = Constants.TOPIC_MODEL_FOLDER + 'corpus/'
# TODO: Consider moving this to the Constants class
# DATASET_FILE_NAME = Constants.generate_file_name(
# 'topic_ensemble_corpus', '', CORPUS_FOLDER, None, None, False)[:-1]
# def get_dataset_file_name():
# return Constants.CACHE_FOLDER + Constants.ITEM_TYPE + '_' + \
# Constants.TOPIC_MODEL_TARGET_REVIEWS + '_document_term_matrix'
def get_dataset_file_name():
return Constants.generate_file_name(
'topic_ensemble_corpus', '', CORPUS_FOLDER, None, None, False)[:-1]
# TODO: Consider moving this to the Constants class
def get_topic_model_prefix(folder='', seed=None):
prefix = 'topic_model'
if seed is not None:
prefix += '_seed-' + str(seed)
return Constants.generate_file_name(
prefix, '', folder, None, None, True, True)[:-1]
def run_parse_directory():
parse_directory_command = Constants.TOPIC_ENSEMBLE_FOLDER + \
'parse-directory.py'
command = [
PYTHON_COMMAND,
parse_directory_command,
Constants.GENERATED_TEXT_FILES_FOLDER,
'-o',
get_dataset_file_name(),
'--tfidf',
'--norm',
]
print(command)
p = subprocess.Popen(command, cwd=Constants.TOPIC_ENSEMBLE_FOLDER)
p.wait()
def run_local_parse_directory():
if os.path.exists(get_dataset_file_name()):
print('The corpus directory has already been parsed')
return
parse_directory_command = PYTHON_CODE_FOLDER + 'topicmodeling/' \
'belford_tfidf.py'
if not os.path.isdir(Constants.TOPIC_MODEL_FOLDER):
os.mkdir(Constants.TOPIC_MODEL_FOLDER)
if not os.path.isdir(BASE_FOLDER):
os.mkdir(BASE_FOLDER)
if not os.path.isdir(CORPUS_FOLDER):
os.mkdir(CORPUS_FOLDER)
command = [
PYTHON_COMMAND,
parse_directory_command,
Constants.GENERATED_TEXT_FILES_FOLDER,
'-o',
get_dataset_file_name(),
'--tfidf',
'--norm',
'--df',
str(Constants.MIN_DICTIONARY_WORD_COUNT),
'--minlen',
'10'
]
print(command)
p = subprocess.Popen(command, cwd=Constants.TOPIC_ENSEMBLE_FOLDER)
p.wait()
# Call generate nmf or generate kfold
def run_generate_nmf():
generate_nmf_command = Constants.TOPIC_ENSEMBLE_FOLDER + \
'generate-nmf.py'
output_folder = BASE_FOLDER + get_topic_model_prefix() + '/'
if not os.path.isdir(Constants.TOPIC_MODEL_FOLDER):
os.mkdir(Constants.TOPIC_MODEL_FOLDER)
if not os.path.isdir(BASE_FOLDER):
os.mkdir(BASE_FOLDER)
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
command = [
PYTHON_COMMAND,
generate_nmf_command,
get_dataset_file_name() + '.pkl',
'-k',
str(Constants.TOPIC_MODEL_NUM_TOPICS),
'-r',
str(Constants.TOPIC_MODEL_PASSES),
'-o',
output_folder,
]
print(command)
p = subprocess.Popen(command, cwd=Constants.TOPIC_ENSEMBLE_FOLDER)
p.wait()
# Call generate kfold
def run_generate_kfold(seed=None):
generate_nmf_command = Constants.TOPIC_ENSEMBLE_FOLDER + \
'generate-kfold.py'
base_topic_folder = get_topic_model_prefix(seed=seed)
output_folder = BASE_FOLDER + base_topic_folder + '/'
if not os.path.isdir(Constants.TOPIC_MODEL_FOLDER):
os.mkdir(Constants.TOPIC_MODEL_FOLDER)
if not os.path.isdir(BASE_FOLDER):
os.mkdir(BASE_FOLDER)
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
command = [
PYTHON_COMMAND,
generate_nmf_command,
get_dataset_file_name() + '.pkl',
'-k',
str(Constants.TOPIC_MODEL_NUM_TOPICS),
'-r',
str(Constants.TOPIC_MODEL_PASSES),
'-f',
str(Constants.TOPIC_MODEL_FOLDS),
'-o',
output_folder,
]
if seed is not None:
command.extend([
'--seed',
str(seed)
])
print(command)
p = subprocess.Popen(command, cwd=Constants.TOPIC_ENSEMBLE_FOLDER)
p.wait()
# Call combine nmf
def run_combine_nmf(seed=None):
generate_nmf_command = Constants.TOPIC_ENSEMBLE_FOLDER + \
'combine-nmf.py'
base_topic_folder = get_topic_model_prefix(seed=seed)
base_files = \
glob.glob(BASE_FOLDER + base_topic_folder + '/*factors*.pkl')
output_folder = \
get_topic_model_prefix(Constants.ENSEMBLE_FOLDER, seed) + '/'
if not os.path.isdir(Constants.ENSEMBLE_FOLDER):
os.mkdir(Constants.ENSEMBLE_FOLDER)
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
command = [
PYTHON_COMMAND,
generate_nmf_command,
get_dataset_file_name() + '.pkl',
]
command.extend(base_files)
command.extend([
'-k',
str(Constants.TOPIC_MODEL_NUM_TOPICS),
'-o',
output_folder,
])
if seed is not None:
command.extend([
'--seed',
str(seed)
])
print(command)
p = subprocess.Popen(command, cwd=Constants.TOPIC_ENSEMBLE_FOLDER)
p.wait()
shutil.rmtree(BASE_FOLDER + base_topic_folder)
def create_several_topic_models():
run_local_parse_directory()
random_seeds = range(1, Constants.TOPIC_MODEL_STABILITY_ITERATIONS + 1)
total_topic_models = len(random_seeds)
for seed in random_seeds:
print('\n\n\nCreating %d of %d topic models' % (seed, total_topic_models))
run_generate_kfold(seed)
run_combine_nmf(seed)
def main():
# run_parse_directory()
run_local_parse_directory()
# run_generate_nmf()
run_generate_kfold()
run_combine_nmf()
# create_several_topic_models()
# start = time.time()
# main()
# end = time.time()
# total_time = end - start
# print("Total time = %f seconds" % total_time)
| melqkiades/yelp | source/python/topicmodeling/topic_ensemble_caller.py | Python | lgpl-2.1 | 6,119 |
import argparse
import importlib
import inspect
import os
import sys
import traceback
from bokeh.plotting import output_file, show
from fabulous.color import bold, red
from app import create_app
def error(msg, stacktrace=None):
"""Print an error message and exit.
Params:
-------
msg: str
Error message.
stacktrace: str
Stacktrace.
"""
if stacktrace:
print(stacktrace)
print(bold(red(msg)))
sys.exit(1)
# get command line arguments
parser = argparse.ArgumentParser(description='Test a Bokeh model.')
parser.add_argument('module_file',
type=str,
help='Python file containing the Bokeh model')
parser.add_argument('model_function',
type=str,
help='Function returning the Bokeh model')
parser.add_argument('func_args',
type=str,
nargs='*',
help='Arguments to pass to the model function')
args = parser.parse_args()
# directory in which this script is located
# note: this must not be app or a subdirectory thereof
this_file = os.path.realpath(__file__)
base_dir = os.path.abspath(os.path.join(this_file, os.path.pardir))
# ensure the given module file isd a Python file
module_file = os.path.abspath(args.module_file)
if not module_file.lower().endswith('.py'):
error('The module filename must end with ".py".')
# find the path of the module file relative to the base directory of the project
module_path = os.path.relpath(module_file, os.path.commonprefix([module_file, this_file]))
# convert the path into a module name (remove ".py" and replace separators with dots)
module = module_path[:-3].replace(os.path.sep, '.')
# import the module and find the requested function to test
try:
imported_module = importlib.import_module(module, __package__)
except:
error('The module {module} couldn\'t be imported. Does the model exist?'.format(module=module))
functions = [member for member in inspect.getmembers(imported_module) if member[0] == args.model_function]
if len(functions) == 0:
error('There is no function "{func}" defined in {module}'.format(func=args.model_function, module=module))
if len(functions) > 1:
error('The name "{func}" is ambiguous in the module {module}.'.format(func=args.model_function, module=module))
# set up Flask app context
# if we don't do this SQLAlchemy will fail
app = create_app(os.getenv('FLASK_CONFIG') or 'development')
app_context = app.app_context()
app_context.push()
# get the Bokeh model
func = functions[0][1]
try:
model = func(*args.func_args)
except:
error('The call to function "{func}" failed.'.format(func=args.model_function),
traceback.format_exc(1))
# output the model
output_file('/tmp/bokeh_test.html')
try:
show(model)
except:
error('The Bokeh model couldn\'t be output. (Is your function returning a Bokeh model?)',
traceback.format_exc())
# clean up
app_context.pop()
| saltastro/salt-data-quality-site | test_bokeh_model.py | Python | mit | 2,995 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The python3 plugin can be used for python 3 based parts.
The python3 plugin can be used for python 3 projects where you would
want to do:
- import python modules with a requirements.txt
- build a python project that has a setup.py
- install sources straight from pip
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- requirements:
(string)
path to a requirements.txt file
- python-packages:
(list)
A list of dependencies to get from PyPi
"""
import os
import snapcraft
class Python3Plugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['requirements'] = {
'type': 'string',
}
schema['properties']['python-packages'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string'
},
'default': [],
}
schema.pop('required')
# Inform Snapcraft of the properties associated with pulling. If these
# change in the YAML Snapcraft will consider the pull step dirty.
schema['pull-properties'].extend(['requirements', 'python-packages'])
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.stage_packages.extend([
'python3-dev',
'python3-pkg-resources',
'python3-setuptools',
])
def env(self, root):
return [
'PYTHONPATH={}'.format(os.path.join(
root, 'usr', 'lib', self.python_version, 'dist-packages')),
# This is until we figure out how to get pip to download only
# and then build in the build step or split out pulling
# stage-packages in an internal private step.
'CPPFLAGS="-I{} $CPPFLAGS"'.format(os.path.join(
root, 'usr', 'include')),
'CFLAGS="-I{} $CFLAGS"'.format(os.path.join(
root, 'usr', 'include')),
]
def pull(self):
super().pull()
self._pip()
def _pip(self):
setup = 'setup.py'
if os.listdir(self.sourcedir):
setup = os.path.join(self.sourcedir, 'setup.py')
if self.options.requirements:
requirements = os.path.join(os.getcwd(), self.options.requirements)
if not os.path.exists(setup) and not \
(self.options.requirements or self.options.python_packages):
return
easy_install = os.path.join(
self.installdir, 'usr', 'bin', 'easy_install3')
prefix = os.path.join(self.installdir, 'usr')
site_packages_dir = os.path.join(
prefix, 'lib', self.python_version, 'site-packages')
# If site-packages doesn't exist, make sure it points to the
# python3 dist-packages (this is a relative link so that it's still
# valid when the .snap is installed). Note that all python3 versions
# share the same dist-packages (e.g. in python3, not python3.4).
if not os.path.exists(site_packages_dir):
os.symlink(os.path.join('..', 'python3', 'dist-packages'),
site_packages_dir)
self.run(['python3', easy_install, '--prefix', prefix, 'pip'])
pip3 = os.path.join(self.installdir, 'usr', 'bin', 'pip3')
pip_install = ['python3', pip3, 'install', '--target',
site_packages_dir]
if self.options.requirements:
self.run(pip_install + ['--requirement', requirements])
if self.options.python_packages:
self.run(pip_install + ['--upgrade'] +
self.options.python_packages)
if os.path.exists(setup):
self.run(pip_install + ['.', ], cwd=self.sourcedir)
def build(self):
super().build()
# If setuptools is used, it tries to create files in the
# dist-packages dir and import from there, so it needs to exist
# and be in the PYTHONPATH. It's harmless if setuptools isn't
# used.
setup_file = os.path.join(self.builddir, 'setup.py')
if not os.path.exists(setup_file):
return
os.makedirs(self.dist_packages_dir, exist_ok=True)
self.run(
['python3', setup_file, 'install', '--install-layout=deb',
'--prefix={}/usr'.format(self.installdir)], cwd=self.builddir)
@property
def dist_packages_dir(self):
return os.path.join(
self.installdir, 'usr', 'lib', self.python_version,
'dist-packages')
@property
def python_version(self):
return self.run_output(['py3versions', '-d'])
def snap_fileset(self):
fileset = super().snap_fileset()
fileset.append('-usr/bin/pip*')
fileset.append('-usr/lib/python*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/*/*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/*/*/*/*/*/__pycache__/*.pyc')
fileset.append('-usr/lib/python*/*/*/*/*/*/*/*/*/*/__pycache__/*.pyc')
return fileset
| didrocks/snapcraft | snapcraft/plugins/python3.py | Python | gpl-3.0 | 6,466 |
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
version_added: "2.1"
options:
auto_remove:
description:
- enable auto-removal of the container on daemon side when the container's process exits
type: bool
default: no
version_added: "2.4"
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
type: int
capabilities:
description:
- List of capabilities to add to the container.
type: list
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
version_added: "2.7"
cleanup:
description:
- Use with I(detach=false) to remove the container after successful execution.
type: bool
default: no
version_added: "2.2"
command:
description:
- Command to execute when the container starts.
A command may be either a string or a list.
- Prior to version 2.4, strings were split on commas.
type: raw
comparisons:
description:
- Allows to specify how properties of existing containers are compared with
module options to decide whether the container should be recreated / updated
or not. Only options which correspond to the state of a container as handled
by the Docker daemon can be specified.
- Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
and C(allow_more_present).
- If C(strict) is specified, values are tested for equality, and changes always
result in updating or restarting. If C(ignore) is specified, changes are ignored.
- C(allow_more_present) is allowed only for lists, sets and dicts. If it is
specified for lists or sets, the container will only be updated or restarted if
the module option contains a value which is not present in the container's
options. If the option is specified for a dict, the container will only be updated
or restarted if the module option contains a key which isn't present in the
container's option, or if the value of a key present differs.
- The wildcard option C(*) can be used to set one of the default values C(strict)
or C(ignore) to I(all) comparisons.
- See the examples for details.
type: dict
version_added: "2.8"
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period
type: int
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota
type: int
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
type: str
cpu_shares:
description:
- CPU shares (relative weight).
type: int
detach:
description:
- Enable detached mode to leave the container running in background.
If disabled, the task will reflect the status of the container run (failed if the command failed).
type: bool
default: yes
devices:
description:
- "List of host device bindings to add to the container. Each binding is a mapping expressed
in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
type: list
device_read_bps:
description:
- "List of device path and read rate (bytes per second) from device."
type: list
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit. Format: <number>[<unit>]"
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)"
- "Omitting the unit defaults to bytes."
type: str
required: yes
version_added: "2.8"
device_write_bps:
description:
- "List of device and write rate (bytes per second) to device."
type: list
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit. Format: <number>[<unit>]"
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)"
- "Omitting the unit defaults to bytes."
type: str
required: yes
version_added: "2.8"
device_read_iops:
description:
- "List of device and read rate (IO per second) from device."
type: list
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit."
- "Must be a positive integer."
type: int
required: yes
version_added: "2.8"
device_write_iops:
description:
- "List of device and write rate (IO per second) to device."
type: list
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit."
- "Must be a positive integer."
type: int
required: yes
version_added: "2.8"
dns_opts:
description:
- list of DNS options
type: list
dns_servers:
description:
- List of custom DNS servers.
type: list
dns_search_domains:
description:
- List of custom DNS search domains.
type: list
domainname:
description:
- Container domainname.
type: str
version_added: "2.5"
env:
description:
- Dictionary of key,value pairs.
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
type: dict
env_file:
description:
- Path to a file, present on the target, containing environment variables I(FOO=BAR).
- If variable also present in C(env), then C(env) value will override.
type: path
version_added: "2.2"
entrypoint:
description:
- Command that overwrites the default ENTRYPOINT of the image.
type: list
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
type: dict
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
type: list
aliases:
- exposed
- expose
force_kill:
description:
- Use the kill command when stopping a running container.
type: bool
default: no
aliases:
- forcekill
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
type: list
healthcheck:
description:
- 'Configure a check that is run to determine whether or not containers for this service are "healthy".
See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
for details on how healthchecks work.'
- 'I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)'
type: dict
suboptions:
test:
description:
- Command to run to check health.
- Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
type: raw
interval:
description:
- 'Time between running the check. (default: 30s)'
type: str
timeout:
description:
- 'Maximum time to allow one check to run. (default: 30s)'
type: str
retries:
description:
- 'Consecutive failures needed to report unhealthy. It accept integer value. (default: 3)'
type: int
start_period:
description:
- 'Start period for the container to initialize before starting health-retries countdown. (default: 0s)'
type: str
version_added: "2.8"
hostname:
description:
- Container hostname.
type: str
ignore_image:
description:
- When C(state) is I(present) or I(started) the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If
the image version in the registry does not match the container, the container will be
recreated. Stop this behavior by setting C(ignore_image) to I(True).
- I(Warning:) This option is ignored if C(image) or C(*) is used for the C(comparisons) option.
type: bool
default: no
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to be available locally.
The C(pull) option is ignored for this case.
type: str
init:
description:
- Run an init inside the container that forwards signals and reaps processes.
This option requires Docker API 1.25+.
type: bool
default: no
version_added: "2.6"
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
type: bool
default: no
ipc_mode:
description:
- Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
container's IPC namespace or 'host' to use the host's IPC namespace within the container.
type: str
keep_volumes:
description:
- Retain volumes associated with a removed container.
type: bool
default: yes
kill_signal:
description:
- Override default signal used to kill a running container.
type: str
kernel_memory:
description:
- "Kernel memory limit (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
- Omitting the unit defaults to bytes.
type: str
labels:
description:
- Dictionary of key value pairs.
type: dict
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias).
- Setting this will force container to be restarted.
type: list
log_driver:
description:
- Specify the logging driver. Docker uses I(json-file) by default.
- See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
type: str
log_options:
description:
- Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
for details.
type: dict
aliases:
- log_opt
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33)
type: str
memory:
description:
- "Memory limit (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
default: '0'
memory_reservation:
description:
- "Memory soft limit (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
memory_swap:
description:
- "Total memory limit (memory + swap, format: C(<number>[<unit>])).
Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
- If not set, the value will be remain the same if container exists and will be inherited from the host machine if it is (re-)created.
type: int
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
type: str
required: yes
network_mode:
description:
- Connect the container to a network. Choices are "bridge", "host", "none" or "container:<name|id>"
type: str
userns_mode:
description:
- Set the user namespace mode for the container. Currently, the only valid value is C(host).
type: str
version_added: "2.5"
networks:
description:
- List of networks the container belongs to.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the C(purge_networks) option.
- Note that as opposed to C(docker run ...), M(docker_container) does not remove the default
network if C(networks) is specified. You need to explicity use C(purge_networks) to enforce
the removal of the default network (and all other networks not explicitly mentioned in C(networks)).
type: list
suboptions:
name:
description:
- The network's name.
type: str
required: yes
ipv4_address:
description:
- The container's IPv4 address in this network.
type: str
ipv6_address:
description:
- The container's IPv6 address in this network.
type: str
links:
description:
- A list of containers to link to.
type: list
aliases:
description:
- List of aliases for this container in this network. These names
can be used in the network to reach this container.
type: list
version_added: "2.2"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
type: bool
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune OOM killer preferences.
type: int
version_added: "2.2"
output_logs:
description:
- If set to true, output of the container command will be printed (only effective when log_driver is set to json-file or journald.
type: bool
default: no
version_added: "2.7"
paused:
description:
- Use with the started state to pause running processes inside the container.
type: bool
default: no
pid_mode:
description:
- Set the PID namespace mode for the container.
- Note that docker-py < 2.0 only supports 'host'. Newer versions allow all values supported by the docker daemon.
type: str
pids_limit:
description:
- Set PIDs limit for the container. It accepts an integer value.
- Set -1 for unlimited PIDs.
type: int
version_added: "2.8"
privileged:
description:
- Give extended privileges to the container.
type: bool
default: no
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Port ranges can be used for source and destination ports. If two ranges with
different lengths are specified, the shorter range will be used.
- "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are I(not) allowed. This
is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
to resolve hostnames."
- Container ports must be exposed either in the Dockerfile or via the C(expose) option.
- A value of C(all) will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
type: list
aliases:
- ports
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image
when missing.
- I(Note) that images are only pulled when specified by name. If the image is specified
as a image ID (hash), it cannot be pulled.
type: bool
default: no
purge_networks:
description:
- Remove the container from ALL networks not included in C(networks) parameter.
- Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
type: bool
default: no
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
type: bool
default: no
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
type: bool
default: no
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
type: bool
default: no
restart_policy:
description:
- Container restart policy. Place quotes around I(no) option.
type: str
choices:
- 'no'
- 'on-failure'
- 'always'
- 'unless-stopped'
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
type: int
runtime:
description:
- Runtime to use for the container.
type: str
version_added: "2.8"
shm_size:
description:
- "Size of C(/dev/shm) (format: C(<number>[<unit>])). Number is positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes. If you omit the size entirely, the system uses C(64M).
type: str
security_opts:
description:
- List of security options in the form of C("label:user:User")
type: list
state:
description:
- 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
- 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config. Image version will be taken into account when comparing configuration. To ignore image
version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
container.'
- 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
matches the name, a container will be created and started. If a container matching the name is found but the
configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
and a new container will be created with the requested configuration and started. Image version will be taken into
account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
type: str
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
type: str
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending SIGKILL.
When the container is created by this module, its C(StopTimeout) configuration
will be set to this value.
- When the container is stopped, will be used as a timeout for stopping the
container. In case the container has a custom C(StopTimeout) configuration,
the behavior depends on the version of docker. New versions of docker will
always use the container's configured C(StopTimeout) value if it has been
configured.
type: int
trust_image_content:
description:
- If C(yes), skip image verification.
type: bool
default: no
tmpfs:
description:
- Mount a tmpfs directory
type: list
version_added: 2.4
tty:
description:
- Allocate a pseudo-TTY.
type: bool
default: no
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
type: list
sysctls:
description:
- Dictionary of key,value pairs.
type: dict
version_added: 2.4
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
type: str
uts:
description:
- Set the UTS namespace mode for the container.
type: str
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave).
Note that docker might not support all modes and combinations of such modes."
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or
private label for the volume.
- "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
C(z), and C(Z)."
type: list
volume_driver:
description:
- The container volume driver.
type: str
volumes_from:
description:
- List of container names or Ids to get volumes from.
type: list
working_dir:
description:
- Path to the working directory.
type: str
version_added: "2.4"
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "Chris Houseknecht (@chouseknecht)"
- "Kassian Sun (@kassiansun)"
requirements:
- "docker-py >= 1.8.0"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: "ssssh"
# Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
BOOLEAN_KEY: "yes"
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: ["sleep", "infinity"]
- name: Add container to networks
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
- name: Start a container and use an env file
docker_container:
name: agent
image: jenkinsci/ssh-slave
env_file: /var/tmp/jenkins/agent.env
- name: Create a container with limited capabilities
docker_container:
name: sleepy
image: ubuntu:16.04
command: sleep infinity
capabilities:
- sys_time
cap_drop:
- all
- name: Finer container restart/update control
docker_container:
name: test
image: ubuntu:18.04
env:
- arg1: "true"
- arg2: "whatever"
volumes:
- /tmp:/tmp
comparisons:
image: ignore # don't restart containers with older versions of the image
env: strict # we want precisely this environment
volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
- name: Finer container restart/update control II
docker_container:
name: test
image: ubuntu:18.04
env:
- arg1: "true"
- arg2: "whatever"
comparisons:
'*': ignore # by default, ignore *all* options (including image)
env: strict # except for environment variables; there, we want to be strict
- name: Start container with healthstatus
docker_container:
name: nginx-proxy
image: nginx:1.13
state: started
healthcheck:
# Check if nginx server is healthy by curl'ing the server.
# If this fails or timeouts, the healthcheck fails.
test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
interval: 1m30s
timeout: 10s
retries: 3
start_period: 30s
- name: Remove healthcheck from container
docker_container:
name: nginx-proxy
image: nginx:1.13
state: started
healthcheck:
# The "NONE" check needs to be specified
test: ["NONE"]
- name: start container with block device read limit
docker_container:
name: test
image: ubuntu:18.04
state: started
device_read_bps:
# Limit read rate for /dev/sda to 20 mebibytes per second
- path: /dev/sda
rate: 20M
device_read_iops:
# Limit read rate for /dev/sdb to 300 IO per second
- path: /dev/sdb
rate: 300
'''
RETURN = '''
docker_container:
description:
- Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin.
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly.
- Empty if C(state) is I(absent)
- If detached is I(False), will include Output attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import os
import re
import shlex
from distutils.version import LooseVersion
from ansible.module_utils.basic import human_to_bytes
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DifferenceTracker,
DockerBaseClass,
compare_generic,
is_image_name_id,
sanitize_result,
parse_healthcheck
)
from ansible.module_utils.six import string_types
try:
from docker import utils
from ansible.module_utils.docker.common import docker_version
if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
from docker.types import Ulimit, LogConfig
else:
from docker.utils.types import Ulimit, LogConfig
from docker.errors import APIError, NotFound
except Exception:
# missing docker-py handled in ansible.module_utils.docker.common
pass
REQUIRES_CONVERSION_TO_BYTES = [
'kernel_memory',
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
def is_volume_permissions(input):
for part in input.split(','):
if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave'):
return False
return True
def parse_port_range(range_or_port, client):
'''
Parses a string containing either a single port or a range of ports.
Returns a list of integers for each port in the list.
'''
if '-' in range_or_port:
start, end = [int(port) for port in range_or_port.split('-')]
if end < start:
client.fail('Invalid port range: {0}'.format(range_or_port))
return list(range(start, end + 1))
else:
return [int(range_or_port)]
def split_colon_ipv6(input, client):
'''
Split string by ':', while keeping IPv6 addresses in square brackets in one component.
'''
if '[' not in input:
return input.split(':')
start = 0
result = []
while start < len(input):
i = input.find('[', start)
if i < 0:
result.extend(input[start:].split(':'))
break
j = input.find(']', i)
if j < 0:
client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(input, i + 1))
result.extend(input[start:i].split(':'))
k = input.find(':', j)
if k < 0:
result[-1] += input[i:]
start = len(input)
else:
result[-1] += input[i:k]
if k == len(input):
result.append('')
break
start = k + 1
return result
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.auto_remove = None
self.blkio_weight = None
self.capabilities = None
self.cap_drop = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.device_read_bps = None
self.device_write_bps = None
self.device_read_iops = None
self.device_write_iops = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.domainname = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.healthcheck = None
self.hostname = None
self.ignore_image = None
self.image = None
self.init = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.output_logs = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.name = None
self.network_mode = None
self.userns_mode = None
self.networks = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.pids_limit = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.runtime = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.tmpfs = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
self.working_dir = None
for key, value in client.module.params.items():
setattr(self, key, value)
self.comparisons = client.comparisons
# If state is 'absent', parameters do not have to be parsed or interpreted.
# Only the container's name is needed.
if self.state == 'absent':
return
if self.groups:
# In case integers are passed as groups, we need to convert them to
# strings as docker internally treats them as strings.
self.groups = [str(g) for g in self.groups]
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports in ('all', 'ALL'):
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.tmpfs = self._parse_tmpfs()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.sysctls = self._parse_sysctls()
self.log_config = self._parse_log_config()
try:
self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
except ValueError as e:
self.fail(str(e))
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.pid_mode = self._replace_container_names(self.pid_mode)
self.ipc_mode = self._replace_container_names(self.ipc_mode)
self.network_mode = self._replace_container_names(self.network_mode)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
if self.mac_address:
# Ensure the MAC address uses colons instead of hyphens for later comparison
self.mac_address = self.mac_address.replace('-', ':')
if self.entrypoint:
# convert from list to str.
self.entrypoint = ' '.join([str(x) for x in self.entrypoint])
if self.command:
# convert from list to str
if isinstance(self.command, list):
self.command = ' '.join([str(x) for x in self.command])
for param_name in ["device_read_bps", "device_write_bps"]:
if client.module.params.get(param_name):
self._process_rate_bps(option=param_name)
for param_name in ["device_read_iops", "device_write_iops"]:
if client.module.params.get(param_name):
self._process_rate_iops(option=param_name)
def fail(self, msg):
self.client.fail(msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
cpuset_mems='cpuset_mems',
mem_limit='memory',
mem_reservation='memory_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory',
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
domainname='domainname',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
working_dir='working_dir',
stop_timeout='stop_timeout',
healthcheck='healthcheck',
)
if self.client.docker_py_version < LooseVersion('3.0'):
# cpu_shares and volume_driver moved to create_host_config in > 3
create_params['cpu_shares'] = 'cpu_shares'
create_params['volume_driver'] = 'volume_driver'
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
result[key] = getattr(self, value)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if re.match(r'[.~]', host):
host = os.path.abspath(os.path.expanduser(host))
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
host = os.path.abspath(os.path.expanduser(parts[0]))
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, dummy = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params = dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_opt='dns_opts',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
userns_mode='userns_mode',
cap_add='capabilities',
cap_drop='cap_drop',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
sysctls='sysctls',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
oom_kill_disable='oom_killer',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode',
tmpfs='tmpfs',
init='init',
uts_mode='uts',
runtime='runtime',
auto_remove='auto_remove',
device_read_bps='device_read_bps',
device_write_bps='device_write_bps',
device_read_iops='device_read_iops',
device_write_iops='device_write_iops',
pids_limit='pids_limit',
)
if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
# blkio_weight can always be updated, but can only be set on creation
# when docker-py and docker API are new enough
host_config_params['blkio_weight'] = 'blkio_weight'
if self.client.docker_py_version >= LooseVersion('3.0'):
# cpu_shares and volume_driver moved to create_host_config in > 3
host_config_params['cpu_shares'] = 'cpu_shares'
host_config_params['volume_driver'] = 'volume_driver'
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = split_colon_ipv6(str(port), self.client)
container_port = parts[-1]
protocol = ''
if '/' in container_port:
container_port, protocol = parts[-1].split('/')
container_ports = parse_port_range(container_port, self.client)
p_len = len(parts)
if p_len == 1:
port_binds = len(container_ports) * [(default_ip,)]
elif p_len == 2:
port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
elif p_len == 3:
# We only allow IPv4 and IPv6 addresses for the bind address
if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+\]$', parts[0]):
self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(parts[0]))
if parts[1]:
port_binds = [(parts[0], port) for port in parse_port_range(parts[1], self.client)]
else:
port_binds = len(container_ports) * [(parts[0],)]
for bind, container_port in zip(port_binds, container_ports):
idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
if idx in binds:
old_bind = binds[idx]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[idx] = [old_bind, bind]
else:
binds[idx] = bind
return binds
def _get_volume_binds(self, volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, string_types) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if exposed_port[1] != protocol:
continue
if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = []
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result.append((parsed_link[0], parsed_link[1]))
else:
result.append((parsed_link[0], parsed_link[0]))
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_sysctls(self):
'''
Turn sysctls into an hash of Sysctl objects
'''
return self.sysctls
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config=dict()
)
if self.log_options is not None:
options['Config'] = dict()
for k, v in self.log_options.items():
options['Config'][k] = str(v)
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _parse_tmpfs(self):
'''
Turn tmpfs into a hash of Tmpfs objects
'''
result = dict()
if self.tmpfs is None:
return result
for tmpfs_spec in self.tmpfs:
split_spec = tmpfs_spec.split(":", 1)
if len(split_spec) > 1:
result[split_spec[0]] = split_spec[1]
else:
result[split_spec[0]] = ""
return result
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = str(value)
if self.env:
for name, value in self.env.items():
if not isinstance(value, string_types):
self.fail("Non-string value found for env option. "
"Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s" % (name, ))
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
def _process_rate_bps(self, option):
"""
Format device_read_bps and device_write_bps option
"""
devices_list = []
for v in getattr(self, option):
device_dict = dict((x.title(), y) for x, y in v.items())
device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
devices_list.append(device_dict)
setattr(self, option, devices_list)
def _process_rate_iops(self, option):
"""
Format device_read_iops and device_write_iops option
"""
devices_list = []
for v in getattr(self, option):
device_dict = dict((x.title(), y) for x, y in v.items())
devices_list.append(device_dict)
setattr(self, option, devices_list)
def _replace_container_names(self, mode):
"""
Parse IPC and PID modes. If they contain a container name, replace
with the container's ID.
"""
if mode is None or not mode.startswith('container:'):
return mode
container_name = mode[len('container:'):]
# Try to inspect container to see whether this is an ID or a
# name (and in the latter case, retrieve it's ID)
container = self.client.get_container(container_name)
if container is None:
# If we can't find the container, issue a warning and continue with
# what the user specified.
self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
return mode
return 'container:{0}'.format(container['Id'])
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_sysctls = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
self.parameters_map = dict()
self.parameters_map['expected_links'] = 'links'
self.parameters_map['expected_ports'] = 'expected_ports'
self.parameters_map['expected_exposed'] = 'exposed_ports'
self.parameters_map['expected_volumes'] = 'volumes'
self.parameters_map['expected_ulimits'] = 'ulimits'
self.parameters_map['expected_sysctls'] = 'sysctls'
self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
self.parameters_map['expected_env'] = 'env'
self.parameters_map['expected_entrypoint'] = 'entrypoint'
self.parameters_map['expected_binds'] = 'volumes'
self.parameters_map['expected_cmd'] = 'command'
self.parameters_map['expected_devices'] = 'devices'
self.parameters_map['expected_healthcheck'] = 'healthcheck'
def fail(self, msg):
self.parameters.client.fail(msg)
@property
def exists(self):
return True if self.container else False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
@property
def paused(self):
if self.container and self.container.get('State'):
return self.container['State'].get('Paused', False)
return False
def _compare(self, a, b, compare):
'''
Compare values a and b as described in compare.
'''
return compare_generic(a, b, compare['comparison'], compare['type'])
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
self.parameters.expected_healthcheck = self._get_expected_healthcheck()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
restart_policy = host_config.get('RestartPolicy', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
expected_cmd=config.get('Cmd'),
domainname=config.get('Domainname'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
init=host_config.get('Init'),
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
cap_drop=host_config.get('CapDrop'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
userns_mode=host_config.get('UsernsMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
restart_policy=restart_policy.get('Name'),
runtime=host_config.get('Runtime'),
shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecurityOpt"),
stop_signal=config.get("StopSignal"),
tmpfs=host_config.get('Tmpfs'),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
expected_sysctls=host_config.get('Sysctls'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volume_driver=host_config.get('VolumeDriver'),
volumes_from=host_config.get('VolumesFrom'),
working_dir=config.get('WorkingDir'),
publish_all_ports=host_config.get('PublishAllPorts'),
expected_healthcheck=config.get('Healthcheck'),
disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
device_read_bps=host_config.get('BlkioDeviceReadBps'),
device_write_bps=host_config.get('BlkioDeviceWriteBps'),
device_read_iops=host_config.get('BlkioDeviceReadIOps'),
device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
pids_limit=host_config.get('PidsLimit'),
)
# Options which don't make sense without their accompanying option
if self.parameters.restart_policy:
config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
if self.parameters.log_driver:
config_mapping['log_driver'] = log_config.get('Type')
config_mapping['log_options'] = log_config.get('Config')
if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
# auto_remove is only supported in docker>=2; unfortunately it has a default
# value, that's why we have to jump through the hoops here
config_mapping['auto_remove'] = host_config.get('AutoRemove')
if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
# stop_timeout is only supported in docker>=2.1. Note that stop_timeout
# has a hybrid role, in that it used to be something only used for stopping
# containers, and is now also used as a container property. That's why
# it needs special handling here.
config_mapping['stop_timeout'] = config.get('StopTimeout')
if self.parameters.client.docker_api_version < LooseVersion('1.22'):
# For docker API < 1.22, update_container() is not supported. Thus
# we need to handle all limits which are usually handled by
# update_container() as configuration changes which require a container
# restart.
config_mapping.update(dict(
blkio_weight=host_config.get('BlkioWeight'),
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpu_shares=host_config.get('CpuShares'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
))
differences = DifferenceTracker()
for key, value in config_mapping.items():
minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
if not minimal_version.get('supported', True):
continue
compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), str(value), compare))
if getattr(self.parameters, key, None) is not None:
match = self._compare(getattr(self.parameters, key), value, compare)
if not match:
# no match. record the differences
p = getattr(self.parameters, key)
c = value
if compare['type'] == 'set':
# Since the order does not matter, sort so that the diff output is better.
if p is not None:
p = sorted(p)
if c is not None:
c = sorted(c)
elif compare['type'] == 'set(dict)':
# Since the order does not matter, sort so that the diff output is better.
# We sort the list of dictionaries by using the sorted items of a dict as its key.
if p is not None:
p = sorted(p, key=lambda x: sorted(x.items()))
if c is not None:
c = sorted(c, key=lambda x: sorted(x.items()))
differences.add(key, parameter=p, active=c)
has_differences = not differences.empty
return has_differences, differences
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
if self.parameters.client.docker_api_version < LooseVersion('1.22'):
# update_container() call not supported
return False, []
host_config = self.container['HostConfig']
config_mapping = dict(
blkio_weight=host_config.get('BlkioWeight'),
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpu_shares=host_config.get('CpuShares'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
)
differences = DifferenceTracker()
for key, value in config_mapping.items():
if getattr(self.parameters, key, None):
compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
match = self._compare(getattr(self.parameters, key), value, compare)
if not match:
# no match. record the differences
differences.add(key, parameter=getattr(self.parameters, key), active=value)
different = not differences.empty
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
if connected_networks.get(network['name'], None) is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
diff = True
if network.get('aliases'):
if not compare_generic(network['aliases'], connected_networks[network['name']].get('Aliases'), 'allow_more_present', 'set'):
diff = True
if network.get('links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
if not compare_generic(expected_links, connected_networks[network['name']].get('Links'), 'allow_more_present', 'set'):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=connected_networks[network['name']].get('IPAddress'),
ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
aliases=connected_networks[network['name']].get('Aliases'),
links=connected_networks[network['name']].get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
if isinstance(config[0], int):
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links:
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image['ContainerConfig'].get('Volumes'):
expected_vols.update(image['ContainerConfig'].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
host, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image['ContainerConfig'].get('Env'):
for env_var in image['ContainerConfig']['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_ports = [self._normalize_port(p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) + '/' + p[1] for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_sysctls(self, config_sysctls):
self.log('_get_expected_sysctls')
if config_sysctls is None:
return None
result = dict()
for key, value in config_sysctls.items():
result[key] = str(value)
return result
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
def _normalize_port(self, port):
if '/' not in port:
return port + '/tcp'
return port
def _get_expected_healthcheck(self):
self.log('_get_expected_healthcheck')
expected_healthcheck = dict()
if self.parameters.healthcheck:
expected_healthcheck.update([(k.title().replace("_", ""), v)
for k, v in self.parameters.healthcheck.items()])
return expected_healthcheck
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
client.module.warn('log_options is ignored when log_driver is not specified')
if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
client.module.warn('healthcheck is ignored when test is not specified')
if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
client.module.warn('restart_retries is ignored when restart_policy is not specified')
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.diff_tracker = DifferenceTracker()
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'docker_container': self.facts}
self.results['docker_container'] = self.facts
def present(self, state):
container = self._get_container(self.parameters.name)
was_running = container.running
was_paused = container.paused
# If the image parameter was passed then we need to deal with the image
# version comparison. Otherwise we handle this depending on whether
# the container already runs or not; in the former case, in case the
# container needs to be restarted, we use the existing container's
# image ID.
image = self._get_image()
self.log(image, pretty_print=True)
if not container.exists:
# New container
self.log('No container found')
if not self.parameters.image:
self.fail('Cannot create container when image is not specified!')
self.diff_tracker.add('exists', parameter=True, active=False)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if self.parameters.comparisons['image']['comparison'] == 'strict':
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff_tracker.merge(differences)
self.diff['differences'] = differences.get_legacy_docker_container_diffs()
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
image_to_use = self.parameters.image
if not image_to_use and container and container.Image:
image_to_use = container.Image
if not image_to_use:
self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
new_container = self.container_create(image_to_use, self.parameters.create_parameters)
if new_container:
container = new_container
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container)
if state == 'started' and not container.running:
self.diff_tracker.add('running', parameter=True, active=was_running)
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.diff_tracker.add('running', parameter=True, active=was_running)
self.container_stop(container.Id)
container = self.container_start(container.Id)
elif state == 'stopped' and container.running:
self.diff_tracker.add('running', parameter=False, active=was_running)
self.container_stop(container.Id)
container = self._get_container(container.Id)
if state == 'started' and container.paused != self.parameters.paused:
self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
if not self.check_mode:
try:
if self.parameters.paused:
self.client.pause(container=container.Id)
else:
self.client.unpause(container=container.Id)
except Exception as exc:
self.fail("Error %s container %s: %s" % (
"pausing" if self.parameters.paused else "unpausing", container.Id, str(exc)
))
container = self._get_container(container.Id)
self.results['changed'] = True
self.results['actions'].append(dict(set_paused=self.parameters.paused))
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.diff_tracker.add('running', parameter=False, active=True)
self.container_stop(container.Id)
self.diff_tracker.add('exists', parameter=False, active=True)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.fail(msg, **kwargs)
def _output_logs(self, msg):
self.client.module.log(msg=msg)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
if is_image_name_id(self.parameters.image):
image = self.client.find_image_by_id(self.parameters.image)
else:
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not self.check_mode:
if not image or self.parameters.pull:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
self.diff_tracker.merge(different_limits)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container):
has_network_differences, network_differences = container.has_network_differences()
updated_container = container
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
for netdiff in network_differences:
self.diff_tracker.add(
'network.{0}'.format(netdiff['parameter']['name']),
parameter=netdiff['parameter'],
active=netdiff['container']
)
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
for extra_network in extra_networks:
self.diff_tracker.add(
'network.{0}'.format(extra_network['name']),
active=extra_network
)
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict()
for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
if diff['parameter'].get(para):
params[para] = diff['parameter'][para]
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if not self.parameters.detach:
if self.client.docker_py_version >= LooseVersion('3.0'):
status = self.client.wait(container_id)['StatusCode']
else:
status = self.client.wait(container_id)
if self.parameters.auto_remove:
output = "Cannot retrieve result as auto_remove is enabled"
if self.parameters.output_logs:
self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
else:
config = self.client.inspect_container(container_id)
logging_driver = config['HostConfig']['LogConfig']['Type']
if logging_driver == 'json-file' or logging_driver == 'journald':
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
if self.parameters.output_logs:
self._output_logs(msg=output)
else:
output = "Result logged using `%s` driver" % logging_driver
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
count = 0
while True:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except NotFound as dummy:
pass
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker versions do not allow containers to be removed if they are paused
# Make sure we don't end up in an infinite loop
if count == 3:
self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc)))
count += 1
# Unpause
try:
self.client.unpause(container=container_id)
except Exception as exc2:
self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2)))
# Now try again
continue
if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
pass
else:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
# We only loop when explicitly requested by 'continue'
break
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
self.client.update_container(container_id, **update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
count = 0
while True:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker versions do not allow containers to be removed if they are paused
# Make sure we don't end up in an infinite loop
if count == 3:
self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc)))
count += 1
# Unpause
try:
self.client.unpause(container=container_id)
except Exception as exc2:
self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2)))
# Now try again
continue
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
# We only loop when explicitly requested by 'continue'
break
return response
def detect_ipvX_address_usage(client):
'''
Helper function to detect whether any specified network uses ipv4_address or ipv6_address
'''
for network in client.module.params.get("networks") or []:
if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
return True
return False
class AnsibleDockerClientContainer(AnsibleDockerClient):
# A list of module options which are not docker container properties
__NON_CONTAINER_PROPERTY_OPTIONS = (
'docker_host', 'tls_hostname', 'api_version', 'timeout', 'cacert_path', 'cert_path',
'key_path', 'ssl_version', 'tls', 'tls_verify', 'debug', 'env_file', 'force_kill',
'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks', 'recreate',
'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal',
'output_logs', 'paused'
)
def _parse_comparisons(self):
comparisons = {}
comp_aliases = {}
# Put in defaults
explicit_types = dict(
command='list',
devices='set(dict)',
dns_search_domains='list',
dns_servers='list',
env='set',
entrypoint='list',
etc_hosts='set',
ulimits='set(dict)',
device_read_bps='set(dict)',
device_write_bps='set(dict)',
device_read_iops='set(dict)',
device_write_iops='set(dict)',
)
all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
default_values = dict(
stop_timeout='ignore',
)
for option, data in self.module.argument_spec.items():
all_options.add(option)
for alias in data.get('aliases', []):
all_options.add(alias)
# Ignore options which aren't used as container properties
if option in self.__NON_CONTAINER_PROPERTY_OPTIONS:
continue
# Determine option type
if option in explicit_types:
type = explicit_types[option]
elif data['type'] == 'list':
type = 'set'
elif data['type'] == 'dict':
type = 'dict'
else:
type = 'value'
# Determine comparison type
if option in default_values:
comparison = default_values[option]
elif type in ('list', 'value'):
comparison = 'strict'
else:
comparison = 'allow_more_present'
comparisons[option] = dict(type=type, comparison=comparison, name=option)
# Keep track of aliases
comp_aliases[option] = option
for alias in data.get('aliases', []):
comp_aliases[alias] = option
# Process legacy ignore options
if self.module.params['ignore_image']:
comparisons['image']['comparison'] = 'ignore'
# Process options
if self.module.params.get('comparisons'):
# If '*' appears in comparisons, process it first
if '*' in self.module.params['comparisons']:
value = self.module.params['comparisons']['*']
if value not in ('strict', 'ignore'):
self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
for dummy, v in comparisons.items():
v['comparison'] = value
# Now process all other comparisons.
comp_aliases_used = {}
for key, value in self.module.params['comparisons'].items():
if key == '*':
continue
# Find main key
key_main = comp_aliases.get(key)
if key_main is None:
if key_main in all_options:
self.fail(("The module option '%s' cannot be specified in the comparisons dict," +
" since it does not correspond to container's state!") % key)
self.fail("Unknown module option '%s' in comparisons dict!" % key)
if key_main in comp_aliases_used:
self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
comp_aliases_used[key_main] = key
# Check value and update accordingly
if value in ('strict', 'ignore'):
comparisons[key_main]['comparison'] = value
elif value == 'allow_more_present':
if comparisons[key_main]['type'] == 'value':
self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
comparisons[key_main]['comparison'] = value
else:
self.fail("Unknown comparison mode '%s'!" % value)
# Add implicit options
comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
comparisons['disable_healthcheck'] = dict(type='value',
comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
name='disable_healthcheck')
# Check legacy values
if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
self.module.warn('The ignore_image option has been overridden by the comparisons option!')
self.comparisons = comparisons
def _get_additional_minimal_versions(self):
stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
if stop_timeout_supported:
stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
if stop_timeout_needed_for_update and not stop_timeout_supported:
# We warn (instead of fail) since in older versions, stop_timeout was not used
# to update the container's configuration, but only when stopping a container.
self.module.warn("docker or docker-py version is %s. Minimum version required is 2.1 to update "
"the container's stop_timeout configuration. "
"If you use the 'docker-py' module, you have to switch to the docker 'Python' package." % (docker_version,))
else:
if stop_timeout_needed_for_update and not stop_timeout_supported:
# We warn (instead of fail) since in older versions, stop_timeout was not used
# to update the container's configuration, but only when stopping a container.
self.module.warn("docker API version is %s. Minimum version required is 1.25 to set or "
"update the container's stop_timeout configuration." % (self.docker_api_version_str,))
self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
def __init__(self, **kwargs):
option_minimal_versions = dict(
# internal options
log_config=dict(),
publish_all_ports=dict(),
ports=dict(),
volume_binds=dict(),
name=dict(),
# normal options
device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
ipc_mode=dict(docker_api_version='1.25'),
mac_address=dict(docker_api_version='1.25'),
oom_killer=dict(docker_py_version='2.0.0'),
oom_score_adj=dict(docker_api_version='1.22', docker_py_version='2.0.0'),
shm_size=dict(docker_api_version='1.22'),
stop_signal=dict(docker_api_version='1.21'),
tmpfs=dict(docker_api_version='1.22'),
volume_driver=dict(docker_api_version='1.21'),
memory_reservation=dict(docker_api_version='1.21'),
kernel_memory=dict(docker_api_version='1.21'),
auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
# specials
ipvX_address_supported=dict(docker_py_version='1.9.0', detect_usage=detect_ipvX_address_usage,
usage_msg='ipv4_address or ipv6_address in networks'),
stop_timeout=dict(), # see _get_additional_minimal_versions()
)
super(AnsibleDockerClientContainer, self).__init__(
option_minimal_versions=option_minimal_versions,
option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
**kwargs
)
self._get_additional_minimal_versions()
self._parse_comparisons()
def main():
argument_spec = dict(
auto_remove=dict(type='bool', default=False),
blkio_weight=dict(type='int'),
capabilities=dict(type='list', elements='str'),
cap_drop=dict(type='list', elements='str'),
cleanup=dict(type='bool', default=False),
command=dict(type='raw'),
comparisons=dict(type='dict'),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool', default=True),
devices=dict(type='list', elements='str'),
device_read_bps=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='str'),
)),
device_write_bps=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='str'),
)),
device_read_iops=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='int'),
)),
device_write_iops=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='int'),
)),
dns_servers=dict(type='list', elements='str'),
dns_opts=dict(type='list', elements='str'),
dns_search_domains=dict(type='list', elements='str'),
domainname=dict(type='str'),
entrypoint=dict(type='list', elements='str'),
env=dict(type='dict'),
env_file=dict(type='path'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list', elements='str'),
healthcheck=dict(type='dict', options=dict(
test=dict(type='raw'),
interval=dict(type='str'),
timeout=dict(type='str'),
start_period=dict(type='str'),
retries=dict(type='int'),
)),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
init=dict(type='bool', default=False),
interactive=dict(type='bool', default=False),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list', elements='str'),
log_driver=dict(type='str'),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
networks=dict(type='list', elements='dict', options=dict(
name=dict(type='str', required=True),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
aliases=dict(type='list', elements='str'),
links=dict(type='list', elements='str'),
)),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
output_logs=dict(type='bool', default=False),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
pids_limit=dict(type='int'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', elements='str', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int'),
runtime=dict(type='str'),
security_opts=dict(type='list', elements='str'),
shm_size=dict(type='str'),
state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
sysctls=dict(type='dict'),
tmpfs=dict(type='list', elements='str'),
trust_image_content=dict(type='bool', default=False),
tty=dict(type='bool', default=False),
ulimits=dict(type='list', elements='str'),
user=dict(type='str'),
userns_mode=dict(type='str'),
uts=dict(type='str'),
volume_driver=dict(type='str'),
volumes=dict(type='list', elements='str'),
volumes_from=dict(type='list', elements='str'),
working_dir=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClientContainer(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
min_docker_api_version='1.20',
)
cm = ContainerManager(client)
client.module.exit_json(**sanitize_result(cm.results))
if __name__ == '__main__':
main()
| EvanK/ansible | lib/ansible/modules/cloud/docker/docker_container.py | Python | gpl-3.0 | 120,043 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WSGI tools for use with swift."""
from __future__ import print_function
import errno
import inspect
import os
import signal
import time
import mimetools
from swift import gettext_ as _
from textwrap import dedent
import eventlet
import eventlet.debug
from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout
from paste.deploy import loadwsgi
from eventlet.green import socket, ssl, os as green_os
from six import BytesIO
from six import StringIO
from six.moves.urllib.parse import unquote
from swift.common import utils, constraints
from swift.common.storage_policy import BindPortsCache
from swift.common.swob import Request
from swift.common.utils import capture_stdio, disable_fallocate, \
drop_privileges, get_logger, NullLogger, config_true_value, \
validate_configuration, get_hub, config_auto_int_value, \
CloseableChain
# Set maximum line size of message headers to be accepted.
wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE
try:
import multiprocessing
CPU_COUNT = multiprocessing.cpu_count() or 1
except (ImportError, NotImplementedError):
CPU_COUNT = 1
class NamedConfigLoader(loadwsgi.ConfigLoader):
"""
Patch paste.deploy's ConfigLoader so each context object will know what
config section it came from.
"""
def get_context(self, object_type, name=None, global_conf=None):
context = super(NamedConfigLoader, self).get_context(
object_type, name=name, global_conf=global_conf)
context.name = name
return context
loadwsgi.ConfigLoader = NamedConfigLoader
class ConfigDirLoader(NamedConfigLoader):
"""
Read configuration from multiple files under the given path.
"""
def __init__(self, conf_dir):
# parent class uses filename attribute when building error messages
self.filename = conf_dir = conf_dir.strip()
defaults = {
'here': os.path.normpath(os.path.abspath(conf_dir)),
'__file__': os.path.abspath(conf_dir)
}
self.parser = loadwsgi.NicerConfigParser(conf_dir, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
utils.read_conf_dir(self.parser, conf_dir)
def _loadconfigdir(object_type, uri, path, name, relative_to, global_conf):
if relative_to:
path = os.path.normpath(os.path.join(relative_to, path))
loader = ConfigDirLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
# add config_dir parsing to paste.deploy
loadwsgi._loaders['config_dir'] = _loadconfigdir
class ConfigString(NamedConfigLoader):
"""
Wrap a raw config string up for paste.deploy.
If you give one of these to our loadcontext (e.g. give it to our
appconfig) we'll intercept it and get it routed to the right loader.
"""
def __init__(self, config_string):
self.contents = StringIO(dedent(config_string))
self.filename = "string"
defaults = {
'here': "string",
'__file__': "string",
}
self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
self.parser.readfp(self.contents)
def wrap_conf_type(f):
"""
Wrap a function whos first argument is a paste.deploy style config uri,
such that you can pass it an un-adorned raw filesystem path (or config
string) and the config directive (either config:, config_dir:, or
config_str:) will be added automatically based on the type of entity
(either a file or directory, or if no such entity on the file system -
just a string) before passing it through to the paste.deploy function.
"""
def wrapper(conf_path, *args, **kwargs):
if os.path.isdir(conf_path):
conf_type = 'config_dir'
else:
conf_type = 'config'
conf_uri = '%s:%s' % (conf_type, conf_path)
return f(conf_uri, *args, **kwargs)
return wrapper
appconfig = wrap_conf_type(loadwsgi.appconfig)
def monkey_patch_mimetools():
"""
mimetools.Message defaults content-type to "text/plain"
This changes it to default to None, so we can detect missing headers.
"""
orig_parsetype = mimetools.Message.parsetype
def parsetype(self):
if not self.typeheader:
self.type = None
self.maintype = None
self.subtype = None
self.plisttext = ''
else:
orig_parsetype(self)
parsetype.patched = True
if not getattr(mimetools.Message.parsetype, 'patched', None):
mimetools.Message.parsetype = parsetype
def get_socket(conf):
"""Bind socket to bind ip:port in conf
:param conf: Configuration dict to read settings from
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
try:
bind_port = int(conf['bind_port'])
except (ValueError, KeyError, TypeError):
raise ConfigFilePortError()
bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port)
address_family = [addr[0] for addr in socket.getaddrinfo(
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
sock = None
bind_timeout = int(conf.get('bind_timeout', 30))
retry_until = time.time() + bind_timeout
warn_ssl = False
while not sock and time.time() < retry_until:
try:
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
family=address_family)
if 'cert_file' in conf:
warn_ssl = True
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
keyfile=conf['key_file'])
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
sleep(0.1)
if not sock:
raise Exception(_('Could not bind to %s:%s '
'after trying for %s seconds') % (
bind_addr[0], bind_addr[1], bind_timeout))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
if warn_ssl:
ssl_warning_message = _('WARNING: SSL should only be enabled for '
'testing purposes. Use external SSL '
'termination for a production deployment.')
get_logger(conf).warning(ssl_warning_message)
print(ssl_warning_message)
return sock
class RestrictedGreenPool(GreenPool):
"""
Works the same as GreenPool, but if the size is specified as one, then the
spawn_n() method will invoke waitall() before returning to prevent the
caller from doing any other work (like calling accept()).
"""
def __init__(self, size=1024):
super(RestrictedGreenPool, self).__init__(size=size)
self._rgp_do_wait = (size == 1)
def spawn_n(self, *args, **kwargs):
super(RestrictedGreenPool, self).spawn_n(*args, **kwargs)
if self._rgp_do_wait:
self.waitall()
def pipeline_property(name, **kwargs):
"""
Create a property accessor for the given name. The property will
dig through the bound instance on which it was accessed for an
attribute "app" and check that object for an attribute of the given
name. If the "app" object does not have such an attribute, it will
look for an attribute "app" on THAT object and continue it's search
from there. If the named attribute cannot be found accessing the
property will raise AttributeError.
If a default kwarg is provided you get that instead of the
AttributeError. When found the attribute will be cached on instance
with the property accessor using the same name as the attribute
prefixed with a leading underscore.
"""
cache_attr_name = '_%s' % name
def getter(self):
cached_value = getattr(self, cache_attr_name, None)
if cached_value:
return cached_value
app = self # first app is on self
while True:
app = getattr(app, 'app', None)
if not app:
break
try:
value = getattr(app, name)
except AttributeError:
continue
setattr(self, cache_attr_name, value)
return value
if 'default' in kwargs:
return kwargs['default']
raise AttributeError('No apps in pipeline have a '
'%s attribute' % name)
return property(getter)
class PipelineWrapper(object):
"""
This class provides a number of utility methods for
modifying the composition of a wsgi pipeline.
"""
def __init__(self, context):
self.context = context
def __contains__(self, entry_point_name):
try:
self.index(entry_point_name)
return True
except ValueError:
return False
def startswith(self, entry_point_name):
"""
Tests if the pipeline starts with the given entry point name.
:param entry_point_name: entry point of middleware or app (Swift only)
:returns: True if entry_point_name is first in pipeline, False
otherwise
"""
try:
first_ctx = self.context.filter_contexts[0]
except IndexError:
first_ctx = self.context.app_context
return first_ctx.entry_point_name == entry_point_name
def _format_for_display(self, ctx):
# Contexts specified by pipeline= have .name set in NamedConfigLoader.
if hasattr(ctx, 'name'):
return ctx.name
# This should not happen: a foreign context. Let's not crash.
return "<unknown>"
def __str__(self):
parts = [self._format_for_display(ctx)
for ctx in self.context.filter_contexts]
parts.append(self._format_for_display(self.context.app_context))
return " ".join(parts)
def create_filter(self, entry_point_name):
"""
Creates a context for a filter that can subsequently be added
to a pipeline context.
:param entry_point_name: entry point of the middleware (Swift only)
:returns: a filter context
"""
spec = 'egg:swift#' + entry_point_name
ctx = loadwsgi.loadcontext(loadwsgi.FILTER, spec,
global_conf=self.context.global_conf)
ctx.protocol = 'paste.filter_factory'
ctx.name = entry_point_name
return ctx
def index(self, entry_point_name):
"""
Returns the first index of the given entry point name in the pipeline.
Raises ValueError if the given module is not in the pipeline.
"""
for i, ctx in enumerate(self.context.filter_contexts):
if ctx.entry_point_name == entry_point_name:
return i
raise ValueError("%s is not in pipeline" % (entry_point_name,))
def insert_filter(self, ctx, index=0):
"""
Inserts a filter module into the pipeline context.
:param ctx: the context to be inserted
:param index: (optional) index at which filter should be
inserted in the list of pipeline filters. Default
is 0, which means the start of the pipeline.
"""
self.context.filter_contexts.insert(index, ctx)
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if isinstance(uri, loadwsgi.ConfigLoader):
# bypass loadcontext's uri parsing and loader routing and
# just directly return the context
if global_conf:
uri.update_defaults(global_conf, overwrite=False)
return uri.get_context(object_type, name, global_conf)
add_conf_type = wrap_conf_type(lambda x: x)
return loadwsgi.loadcontext(object_type, add_conf_type(uri), name=name,
relative_to=relative_to,
global_conf=global_conf)
def _add_pipeline_properties(app, *names):
for property_name in names:
if not hasattr(app, property_name):
setattr(app.__class__, property_name,
pipeline_property(property_name))
def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True):
"""
Loads a context from a config file, and if the context is a pipeline
then presents the app with the opportunity to modify the pipeline.
"""
global_conf = global_conf or {}
ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf)
if ctx.object_type.name == 'pipeline':
# give app the opportunity to modify the pipeline context
app = ctx.app_context.create()
func = getattr(app, 'modify_wsgi_pipeline', None)
if func and allow_modify_pipeline:
func(PipelineWrapper(ctx))
return ctx.create()
def run_server(conf, logger, sock, global_conf=None):
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to the timezone in which
# the server first starts running in locations that periodically change
# timezones.
os.environ['TZ'] = time.strftime("%z", time.gmtime())
wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
wsgi.HttpProtocol.log_request = lambda *a: None
# Redirect logging other messages by the underlying WSGI software.
wsgi.HttpProtocol.log_message = \
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)
eventlet.hubs.use_hub(get_hub())
# NOTE(sileht):
# monkey-patching thread is required by python-keystoneclient;
# monkey-patching select is required by oslo.messaging pika driver
# if thread is monkey-patched.
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
thread=True)
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
wsgi_logger = NullLogger()
if eventlet_debug:
# let eventlet.wsgi.server log to stderr
wsgi_logger = None
# utils.LogAdapter stashes name in server; fallback on unadapted loggers
if not global_conf:
if hasattr(logger, 'server'):
log_name = logger.server
else:
log_name = logger.name
global_conf = {'log_name': log_name}
app = loadapp(conf['__file__'], global_conf=global_conf)
max_clients = int(conf.get('max_clients', '1024'))
pool = RestrictedGreenPool(size=max_clients)
try:
# Disable capitalizing headers in Eventlet if possible. This is
# necessary for the AWS SDK to work with swift3 middleware.
argspec = inspect.getargspec(wsgi.server)
if 'capitalize_response_headers' in argspec.args:
wsgi.server(sock, app, wsgi_logger, custom_pool=pool,
capitalize_response_headers=False)
else:
wsgi.server(sock, app, wsgi_logger, custom_pool=pool)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
pool.waitall()
class WorkersStrategy(object):
"""
WSGI server management strategy object for a single bind port and listen
socket shared by a configured number of forked-off workers.
Used in :py:func:`run_wsgi`.
:param dict conf: Server configuration dictionary.
:param logger: The server's :py:class:`~swift.common.utils.LogAdaptor`
object.
"""
def __init__(self, conf, logger):
self.conf = conf
self.logger = logger
self.sock = None
self.children = []
self.worker_count = config_auto_int_value(conf.get('workers'),
CPU_COUNT)
def loop_timeout(self):
"""
We want to keep from busy-waiting, but we also need a non-None value so
the main loop gets a chance to tell whether it should keep running or
not (e.g. SIGHUP received).
So we return 0.5.
"""
return 0.5
def bind_ports(self):
"""
Bind the one listen socket for this strategy and drop privileges
(since the parent process will never need to bind again).
"""
try:
self.sock = get_socket(self.conf)
except ConfigFilePortError:
msg = 'bind_port wasn\'t properly set in the config file. ' \
'It must be explicitly set to a valid port number.'
return msg
drop_privileges(self.conf.get('user', 'swift'))
def no_fork_sock(self):
"""
Return a server listen socket if the server should run in the
foreground (no fork).
"""
# Useful for profiling [no forks].
if self.worker_count == 0:
return self.sock
def new_worker_socks(self):
"""
Yield a sequence of (socket, opqaue_data) tuples for each server which
should be forked-off and started.
The opaque_data item for each socket will passed into the
:py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods
where it will be ignored.
"""
while len(self.children) < self.worker_count:
yield self.sock, None
def post_fork_hook(self):
"""
Perform any initialization in a forked-off child process prior to
starting the wsgi server.
"""
pass
def log_sock_exit(self, sock, _unused):
"""
Log a server's exit.
:param socket sock: The listen socket for the worker just started.
:param _unused: The socket's opaque_data yielded by
:py:meth:`new_worker_socks`.
"""
self.logger.notice('Child %d exiting normally' % os.getpid())
def register_worker_start(self, sock, _unused, pid):
"""
Called when a new worker is started.
:param socket sock: The listen socket for the worker just started.
:param _unused: The socket's opaque_data yielded by new_worker_socks().
:param int pid: The new worker process' PID
"""
self.logger.notice('Started child %s' % pid)
self.children.append(pid)
def register_worker_exit(self, pid):
"""
Called when a worker has exited.
:param int pid: The PID of the worker that exited.
"""
self.logger.error('Removing dead child %s' % pid)
self.children.remove(pid)
def shutdown_sockets(self):
"""
Shutdown any listen sockets.
"""
greenio.shutdown_safe(self.sock)
self.sock.close()
class PortPidState(object):
"""
A helper class for :py:class:`ServersPerPortStrategy` to track listen
sockets and PIDs for each port.
:param int servers_per_port: The configured number of servers per port.
:param logger: The server's :py:class:`~swift.common.utils.LogAdaptor`
"""
def __init__(self, servers_per_port, logger):
self.servers_per_port = servers_per_port
self.logger = logger
self.sock_data_by_port = {}
def sock_for_port(self, port):
"""
:param int port: The port whose socket is desired.
:returns: The bound listen socket for the given port.
"""
return self.sock_data_by_port[port]['sock']
def port_for_sock(self, sock):
"""
:param socket sock: A tracked bound listen socket
:returns: The port the socket is bound to.
"""
for port, sock_data in self.sock_data_by_port.items():
if sock_data['sock'] == sock:
return port
def _pid_to_port_and_index(self, pid):
for port, sock_data in self.sock_data_by_port.items():
for server_idx, a_pid in enumerate(sock_data['pids']):
if pid == a_pid:
return port, server_idx
def port_index_pairs(self):
"""
Returns current (port, server index) pairs.
:returns: A set of (port, server_idx) tuples for currently-tracked
ports, sockets, and PIDs.
"""
current_port_index_pairs = set()
for port, pid_state in self.sock_data_by_port.items():
current_port_index_pairs |= set(
(port, i)
for i, pid in enumerate(pid_state['pids'])
if pid is not None)
return current_port_index_pairs
def track_port(self, port, sock):
"""
Start tracking servers for the given port and listen socket.
:param int port: The port to start tracking
:param socket sock: The bound listen socket for the port.
"""
self.sock_data_by_port[port] = {
'sock': sock,
'pids': [None] * self.servers_per_port,
}
def not_tracking(self, port):
"""
Return True if the specified port is not being tracked.
:param int port: A port to check.
"""
return port not in self.sock_data_by_port
def all_socks(self):
"""
Yield all current listen sockets.
"""
for orphan_data in self.sock_data_by_port.itervalues():
yield orphan_data['sock']
def forget_port(self, port):
"""
Idempotently forget a port, closing the listen socket at most once.
"""
orphan_data = self.sock_data_by_port.pop(port, None)
if orphan_data:
greenio.shutdown_safe(orphan_data['sock'])
orphan_data['sock'].close()
self.logger.notice('Closing unnecessary sock for port %d', port)
def add_pid(self, port, index, pid):
self.sock_data_by_port[port]['pids'][index] = pid
def forget_pid(self, pid):
"""
Idempotently forget a PID. It's okay if the PID is no longer in our
data structure (it could have been removed by the "orphan port" removal
in :py:meth:`new_worker_socks`).
:param int pid: The PID which exited.
"""
port_server_idx = self._pid_to_port_and_index(pid)
if port_server_idx is None:
# This method can lose a race with the "orphan port" removal, when
# a ring reload no longer contains a port. So it's okay if we were
# unable to find a (port, server_idx) pair.
return
dead_port, server_idx = port_server_idx
self.logger.error('Removing dead child %d (PID: %s) for port %s',
server_idx, pid, dead_port)
self.sock_data_by_port[dead_port]['pids'][server_idx] = None
class ServersPerPortStrategy(object):
"""
WSGI server management strategy object for an object-server with one listen
port per unique local port in the storage policy rings. The
`servers_per_port` integer config setting determines how many workers are
run per port.
Used in :py:func:`run_wsgi`.
:param dict conf: Server configuration dictionary.
:param logger: The server's :py:class:`~swift.common.utils.LogAdaptor`
object.
:param int servers_per_port: The number of workers to run per port.
"""
def __init__(self, conf, logger, servers_per_port):
self.conf = conf
self.logger = logger
self.servers_per_port = servers_per_port
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.port_pid_state = PortPidState(servers_per_port, logger)
bind_ip = conf.get('bind_ip', '0.0.0.0')
self.cache = BindPortsCache(self.swift_dir, bind_ip)
def _reload_bind_ports(self):
self.bind_ports = self.cache.all_bind_ports_for_node()
def _bind_port(self, port):
new_conf = self.conf.copy()
new_conf['bind_port'] = port
sock = get_socket(new_conf)
self.port_pid_state.track_port(port, sock)
def loop_timeout(self):
"""
Return timeout before checking for reloaded rings.
:returns: The time to wait for a child to exit before checking for
reloaded rings (new ports).
"""
return self.ring_check_interval
def bind_ports(self):
"""
Bind one listen socket per unique local storage policy ring port. Then
do all the work of drop_privileges except the actual dropping of
privileges (each forked-off worker will do that post-fork in
:py:meth:`post_fork_hook`).
"""
self._reload_bind_ports()
for port in self.bind_ports:
self._bind_port(port)
# The workers strategy drops privileges here, which we obviously cannot
# do if we want to support binding to low ports. But we do want some
# of the actions that drop_privileges did.
try:
os.setsid()
except OSError:
pass
# In case you need to rmdir where you started the daemon:
os.chdir('/')
# Ensure files are created with the correct privileges:
os.umask(0o22)
def no_fork_sock(self):
"""
This strategy does not support running in the foreground.
"""
pass
def new_worker_socks(self):
"""
Yield a sequence of (socket, server_idx) tuples for each server which
should be forked-off and started.
Any sockets for "orphaned" ports no longer in any ring will be closed
(causing their associated workers to gracefully exit) after all new
sockets have been yielded.
The server_idx item for each socket will passed into the
:py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods.
"""
self._reload_bind_ports()
desired_port_index_pairs = set(
(p, i) for p in self.bind_ports
for i in range(self.servers_per_port))
current_port_index_pairs = self.port_pid_state.port_index_pairs()
if desired_port_index_pairs != current_port_index_pairs:
# Orphan ports are ports which had object-server processes running,
# but which no longer appear in the ring. We'll kill them after we
# start missing workers.
orphan_port_index_pairs = current_port_index_pairs - \
desired_port_index_pairs
# Fork off worker(s) for every port who's supposed to have
# worker(s) but doesn't
missing_port_index_pairs = desired_port_index_pairs - \
current_port_index_pairs
for port, server_idx in sorted(missing_port_index_pairs):
if self.port_pid_state.not_tracking(port):
try:
self._bind_port(port)
except Exception as e:
self.logger.critical('Unable to bind to port %d: %s',
port, e)
continue
yield self.port_pid_state.sock_for_port(port), server_idx
for orphan_pair in orphan_port_index_pairs:
# For any port in orphan_port_index_pairs, it is guaranteed
# that there should be no listen socket for that port, so we
# can close and forget them.
self.port_pid_state.forget_port(orphan_pair[0])
def post_fork_hook(self):
"""
Called in each child process, prior to starting the actual wsgi server,
to drop privileges.
"""
drop_privileges(self.conf.get('user', 'swift'), call_setsid=False)
def log_sock_exit(self, sock, server_idx):
"""
Log a server's exit.
"""
port = self.port_pid_state.port_for_sock(sock)
self.logger.notice('Child %d (PID %d, port %d) exiting normally',
server_idx, os.getpid(), port)
def register_worker_start(self, sock, server_idx, pid):
"""
Called when a new worker is started.
:param socket sock: The listen socket for the worker just started.
:param server_idx: The socket's server_idx as yielded by
:py:meth:`new_worker_socks`.
:param int pid: The new worker process' PID
"""
port = self.port_pid_state.port_for_sock(sock)
self.logger.notice('Started child %d (PID %d) for port %d',
server_idx, pid, port)
self.port_pid_state.add_pid(port, server_idx, pid)
def register_worker_exit(self, pid):
"""
Called when a worker has exited.
:param int pid: The PID of the worker that exited.
"""
self.port_pid_state.forget_pid(pid)
def shutdown_sockets(self):
"""
Shutdown any listen sockets.
"""
for sock in self.port_pid_state.all_socks():
greenio.shutdown_safe(sock)
sock.close()
def run_wsgi(conf_path, app_section, *args, **kwargs):
"""
Runs the server according to some strategy. The default strategy runs a
specified number of workers in pre-fork model. The object-server (only)
may use a servers-per-port strategy if its config has a servers_per_port
setting with a value greater than zero.
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:returns: 0 if successful, nonzero otherwise
"""
# Load configuration, Set logger and Load request processor
try:
(conf, logger, log_name) = \
_initrp(conf_path, app_section, *args, **kwargs)
except ConfigFileError as e:
print(e)
return 1
servers_per_port = int(conf.get('servers_per_port', '0') or 0)
# NOTE: for now servers_per_port is object-server-only; future work could
# be done to test and allow it to be used for account and container
# servers, but that has not been done yet.
if servers_per_port and app_section == 'object-server':
strategy = ServersPerPortStrategy(
conf, logger, servers_per_port=servers_per_port)
else:
strategy = WorkersStrategy(conf, logger)
error_msg = strategy.bind_ports()
if error_msg:
logger.error(error_msg)
print(error_msg)
return 1
# Ensure the configuration and application can be loaded before proceeding.
global_conf = {'log_name': log_name}
if 'global_conf_callback' in kwargs:
kwargs['global_conf_callback'](conf, global_conf)
loadapp(conf_path, global_conf=global_conf)
# set utils.FALLOCATE_RESERVE if desired
reserve = int(conf.get('fallocate_reserve', 0))
if reserve > 0:
utils.FALLOCATE_RESERVE = reserve
# redirect errors to logger and close stdio
capture_stdio(logger)
no_fork_sock = strategy.no_fork_sock()
if no_fork_sock:
run_server(conf, logger, no_fork_sock, global_conf=global_conf)
return 0
def kill_children(*args):
"""Kills the entire process group."""
logger.error('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
running[0] = False
os.killpg(0, signal.SIGTERM)
def hup(*args):
"""Shuts down the server, but allows running requests to complete"""
logger.error('SIGHUP received')
signal.signal(signal.SIGHUP, signal.SIG_IGN)
running[0] = False
running = [True]
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup)
while running[0]:
for sock, sock_info in strategy.new_worker_socks():
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
strategy.post_fork_hook()
run_server(conf, logger, sock)
strategy.log_sock_exit(sock, sock_info)
return 0
else:
strategy.register_worker_start(sock, sock_info, pid)
# The strategy may need to pay attention to something in addition to
# child process exits (like new ports showing up in a ring).
#
# NOTE: a timeout value of None will just instantiate the Timeout
# object and not actually schedule it, which is equivalent to no
# timeout for the green_os.wait().
loop_timeout = strategy.loop_timeout()
with Timeout(loop_timeout, exception=False):
try:
pid, status = green_os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
strategy.register_worker_exit(pid)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
logger.notice('User quit')
running[0] = False
break
strategy.shutdown_sockets()
logger.notice('Exited')
return 0
class ConfigFileError(Exception):
pass
class ConfigFilePortError(ConfigFileError):
pass
def _initrp(conf_path, app_section, *args, **kwargs):
try:
conf = appconfig(conf_path, name=app_section)
except Exception as e:
raise ConfigFileError("Error trying to load config from %s: %s" %
(conf_path, e))
validate_configuration()
# pre-configure logger
log_name = conf.get('log_name', app_section)
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = get_logger(conf, log_name,
log_to_console=kwargs.pop('verbose', False),
log_route='wsgi')
# disable fallocate if desired
if config_true_value(conf.get('disable_fallocate', 'no')):
disable_fallocate()
monkey_patch_mimetools()
return (conf, logger, log_name)
def init_request_processor(conf_path, app_section, *args, **kwargs):
"""
Loads common settings from conf
Sets the logger
Loads the request processor
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:returns: the loaded application entry point
:raises ConfigFileError: Exception is raised for config file error
"""
(conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs)
app = loadapp(conf_path, global_conf={'log_name': log_name})
return (app, conf, logger, log_name)
class WSGIContext(object):
"""
This class provides a means to provide context (scope) for a middleware
filter to have access to the wsgi start_response results like the request
status and headers.
"""
def __init__(self, wsgi_app):
self.app = wsgi_app
def _start_response(self, status, headers, exc_info=None):
"""
Saves response info without sending it to the remote client.
Uses the same semantics as the usual WSGI start_response.
"""
self._response_status = status
self._response_headers = headers
self._response_exc_info = exc_info
def _app_call(self, env):
"""
Ensures start_response has been called before returning.
"""
self._response_status = None
self._response_headers = None
self._response_exc_info = None
resp = self.app(env, self._start_response)
# if start_response has been called, just return the iter
if self._response_status is not None:
return resp
resp = iter(resp)
try:
first_chunk = next(resp)
except StopIteration:
return iter([])
else: # We got a first_chunk
return CloseableChain([first_chunk], resp)
def _get_status_int(self):
"""
Returns the HTTP status int from the last called self._start_response
result.
"""
return int(self._response_status.split(' ', 1)[0])
def _response_header_value(self, key):
"Returns str of value for given header key or None"
for h_key, val in self._response_headers:
if h_key.lower() == key.lower():
return val
return None
def make_env(env, method=None, path=None, agent='Swift', query_string=None,
swift_source=None):
"""
Returns a new fresh WSGI environment.
:param env: The WSGI environment to base the new environment on.
:param method: The new REQUEST_METHOD or None to use the
original.
:param path: The new path_info or none to use the original. path
should NOT be quoted. When building a url, a Webob
Request (in accordance with wsgi spec) will quote
env['PATH_INFO']. url += quote(environ['PATH_INFO'])
:param query_string: The new query_string or none to use the original.
When building a url, a Webob Request will append
the query string directly to the url.
url += '?' + env['QUERY_STRING']
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh WSGI environment.
"""
newenv = {}
for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO',
'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT',
'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD',
'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
'swift.trans_id', 'swift.authorize_override',
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
'HTTP_REFERER'):
if name in env:
newenv[name] = env[name]
if method:
newenv['REQUEST_METHOD'] = method
if path:
newenv['PATH_INFO'] = path
newenv['SCRIPT_NAME'] = ''
if query_string is not None:
newenv['QUERY_STRING'] = query_string
if agent:
newenv['HTTP_USER_AGENT'] = (
agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip()
elif agent == '' and 'HTTP_USER_AGENT' in newenv:
del newenv['HTTP_USER_AGENT']
if swift_source:
newenv['swift.source'] = swift_source
newenv['wsgi.input'] = BytesIO()
if 'SCRIPT_NAME' not in newenv:
newenv['SCRIPT_NAME'] = ''
return newenv
def make_subrequest(env, method=None, path=None, body=None, headers=None,
agent='Swift', swift_source=None, make_env=make_env):
"""
Makes a new swob.Request based on the current env but with the
parameters specified.
:param env: The WSGI environment to base the new request on.
:param method: HTTP method of new request; default is from
the original env.
:param path: HTTP path of new request; default is from the
original env. path should be compatible with what you
would send to Request.blank. path should be quoted and it
can include a query string. for example:
'/a%20space?unicode_str%E8%AA%9E=y%20es'
:param body: HTTP body of new request; empty by default.
:param headers: Extra HTTP headers of new request; None by
default.
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param make_env: make_subrequest calls this make_env to help build the
swob.Request.
:returns: Fresh swob.Request object.
"""
query_string = None
path = path or ''
if path and '?' in path:
path, query_string = path.split('?', 1)
newenv = make_env(env, method, path=unquote(path), agent=agent,
query_string=query_string, swift_source=swift_source)
if not headers:
headers = {}
if body:
return Request.blank(path, environ=newenv, body=body, headers=headers)
else:
return Request.blank(path, environ=newenv, headers=headers)
def make_pre_authed_env(env, method=None, path=None, agent='Swift',
query_string=None, swift_source=None):
"""Same as :py:func:`make_env` but with preauthorization."""
newenv = make_env(
env, method=method, path=path, agent=agent, query_string=query_string,
swift_source=swift_source)
newenv['swift.authorize'] = lambda req: None
newenv['swift.authorize_override'] = True
newenv['REMOTE_USER'] = '.wsgi.pre_authed'
return newenv
def make_pre_authed_request(env, method=None, path=None, body=None,
headers=None, agent='Swift', swift_source=None):
"""Same as :py:func:`make_subrequest` but with preauthorization."""
return make_subrequest(
env, method=method, path=path, body=body, headers=headers, agent=agent,
swift_source=swift_source, make_env=make_pre_authed_env)
| aerwin3/swift | swift/common/wsgi.py | Python | apache-2.0 | 43,149 |
'''file: poly_voronoi.py
module: poly_voronoi
Contains classes for parsing .voronoi.* files into suitable list for
scenario polygon initialization.
'''
import os
import subprocess
try:
from siku import geocoords
from siku import element
from siku import geofiles
from siku import polygon
except ImportError:
import geocoords
import element
import geofiles
import polygon
import mathutils
import math
Vec = mathutils.Vector
latlon = geocoords.lonlat_deg_norm
norm = geocoords.norm_deg
Poly = polygon.Polygon
Quat = mathutils.Quaternion
#-----------------------------------------------------------------------------
def make_pairs( lst ):
'''Utility: generates sorted list of pairs of indexes from raw list'''
l = len(lst)
res = []
for i in range( l-1 ):
for j in range(i+1, l):
res.append( ( min(lst[i], lst[j]), max(lst[i], lst[j]) ) )
res.sort()
return res
def find_h( rc, r1, r2 ):
'''Utility: find the altitude of triangle'''
d1 = (rc - r1).length
d2 = (rc - r2).length
d = (r2 - r1).length
if d == 0.0:
return d1
s = (d+d1+d2)*0.5
return 2 * math.sqrt( s*(s-d)*(s-d1)*(s-d2) ) / d
def find_delta( poly ):
'''Utility: calculates the radius of largest inscribed circle'''
l = len(poly.poly_xyz)
r = find_h( poly.C, poly.poly_xyz[-1], poly.poly_xyz[0] )
for i in range(1, l):
tr = find_h( poly.C, poly.poly_xyz[i-1], poly.poly_xyz[i] )
if tr < r:
r = tr
return r
#------------------------------------------------------------------------------
# Class Vert. Loads and contains vertices coords
#------------------------------------------------------------------------------
class Vert:
'''Loads and contains vertices coords from .voronoi.xyz
'''
def __init__( self, file = None ):
'''Init, try to load
'''
self.coords = [] #list of verts [x, y, z]
if file:
self.load( file )
return
def load( self, file ):
'''Load from .voronoi.xyz'''
with open( file, 'r+') as inp:
t = []
for line in inp:
t = line.split()
self.coords.append( [ float(f) for f in t ] );
return
#------------------------------------------------------------------------------
# Class Seq. Loads and contains vertices indexes
#------------------------------------------------------------------------------
class Seq:
'''Loads and contains vertices indexes from .voronoi.xyzf
'''
def __init__( self, file = None ):
'''Init, try to load
'''
self.inds = [] #list of lists of vertices` indexes
if file:
self.load( file )
return
def load( self, file ):
'''Load from .voronoi.xyzf'''
with open( file, 'r+') as inp:
t = []
for line in inp:
t = line.split()
self.inds.append( [ int(i) for i in t[:-1] ] );
return
#------------------------------------------------------------------------------
# Class PolyVor. Provides methods for parsing files into list polygon vertices
#------------------------------------------------------------------------------
class PolyVor:
'''A class for retrieving list of lists of polygon vertices from
.voronoi.* files
'''
#default filters for GMT borders forming
default_ocean = 'gmt gmtselect temp.lli -Dl -Nk/s/s/s/s > oceanf.lli'
default_land = 'gmt gmtselect temp.lli -Dl -Ns/k/k/k/k > landf.lli'
def __init__( self, coords_f = None, seq_f = None ):
'''Init, try to load files
'''
self.coords = [] #list of lists of verts` coord: [ [ (lon, lat) ] ]
self.verts = Vert() #list of vertices` coords (x, y, z)
self.seq = Seq() #list of lists of verts` indexes
self.links = []
self.init_links = [] #PRIVATE
self.init_inds = [] #PRIVATE
self.delta = None #PRIVATE
if coords_f and seq_f:
self.load( coords_f, seq_f )
return
def load( self, coords_f, seq_f ):
'''Loads vertices from files
'''
self.verts.load( coords_f )
self.seq.load( seq_f )
ViP = [ [] for i in range(len(self.verts.coords)) ]
c = 0
for l in self.seq.inds:
self.coords.append(
[ latlon( Vec( self.verts.coords[ i-1 ] ) ) for i in l ]
)
###all next is for iniital links
for j in l:
ViP[j-1].append(c) ## adding poly index to each point of it
c += 1
#generating initial links
temp = { i:{} for i in range(len(self.coords)) } #preparing dict of
#dicts - polygon and
#it`s neighbours
for v in ViP:
l = make_pairs( v ) #raw pairs of contacting polygons
for i in l:
temp[i[0]][i[1]] = i[1] #temp =
#= { poly1ID: { poly2ID: poly2ID } }
p = Poly()
R = []
IL = []
for i1 in temp:
p.update( self.coords[i1][:] ) #calculating polygon props
R.append( find_delta( p ) ) #accumulating radiuses of 'inscribed'
#circles
IL.append( ( p.C, temp[i1] ) ) # [ (Vec, { p2ID:p2ID } ) ]
self.delta = min( R ) #delta = minimum circle
self.init_links = IL
return
def generate_links( self, Els ):
'''Generates links (based upon init_links) from Els - list of
Elements'''
links = []
reindex = {} #new indexes of old polygons (aftes all filters)
for i in range(len(Els)):
#props of new polygon
c = Quat(Els[i].q).to_matrix() * Vec( (0.0, 0.0, 1.0) )
#serching for 'new' polygons matching to 'old' ones
for j in range(len(self.init_links)):
l = self.init_links[j]
if (c - l[0]).length < self.delta:
reindex[j] = i
#reindexing Elements` links
for i in range(len(Els)):
l = self.init_links[i]
for o in l[1]:
t = reindex.get( o, None )
if t != None:
links.append( (min(i, t), max(i, t)) )
links = list(set(links)) #getting unique pairs (for sure)
links.sort()
self.links = links
return links
def filter_( self, minlon, maxlon, minlat, maxlat ):
'''Excludes all polygons, that have at least one vertex, located
outside specified region.
'''
temp = []
for l in self.coords:
flag = True
for i in l:
if i[0] < minlon or i[0] > maxlon or \
i[1] < minlat or i[1] > maxlat:
flag = False
break
if flag:
temp.append( l )
self.coords = temp
return
def mark_borders( self, Els, file_b, minlon = 0, maxlon = 360, \
minlat = -90, maxlat = 90 ):
'''Marks all elements in 'Els' which contain at least one point from
file_b as 'f_static'
'''
#bord = Vert( file_b )
verts = geofiles.r_lonlat( file_b )
for v in verts:
if v[0] < minlon or v[0] > maxlon or \
v[1] < minlat or v[1] > maxlat:
verts.remove( v )
for e in Els:
for v in verts:
if e.does_contain( v ):
e.flag_state = element.Element.f_static
verts.remove( v )
break
return
def get_border_by_gmt( self, land_filter=None, ocean_filter=None, \
vert_f='temp.lli', oceanf='oceanf.lli', \
landf='landf.lli' ):
'''Returns list of border polygon indexes. Uses GMT for searching
the polygons, those have both wet and dry vertices.
'''
tc = []
for i in range( len ( self.coords ) ):
p = self.coords[i]
[ tc.append( [ v[0], v[1], i ] ) for v in p ]
geofiles.w_lonlati( vert_f, tc )
if ocean_filter == None:
ocean_filter = self.default_ocean
subprocess.call( ocean_filter, shell=True ) # most time spent here
if land_filter == None:
land_filter = self.default_land
subprocess.call( land_filter, shell=True ) # most time spent here
tc = geofiles.r_lonlati( landf )
I = { p[2]:p[2] for p in tc }
tc = geofiles.r_lonlati( landf )
II = { p[2]:p[2] for p in tc }
S = [ i for i in I if i in II ]
os.remove( vert_f )
os.remove( oceanf )
os.remove( landf )
return S
def clear_the_land( self, ocean_filter=None, \
vert_f='temp.lli', filt_vert_f='oceanf.lli' ):
'''Clears all polygons, that have no vertices located on water'''
tc = []
for i in range( len ( self.coords ) ):
p = self.coords[i]
[ tc.append( [ v[0], v[1], i ] ) for v in p ]
geofiles.w_lonlati( vert_f, tc )
if ocean_filter == None:
ocean_filter = self.default_ocean
subprocess.call( ocean_filter, shell=True ) # most time spent here
tc = geofiles.r_lonlati( filt_vert_f )
I = { p[2]:p[2] for p in tc }
self.coords = [ self.coords[i] for i in I ]
os.remove( vert_f )
os.remove( filt_vert_f )
return
# ------------------------------------------------------------------------
if __name__=='__main__':
PV = PolyVor( 'mytest.voronoi.xyz', 'mytest.voronoi.xyzf' )
PV.filter( 150, 250, 65, 85 )
coords = PV.coords
| Atoku/siku | python/siku/poly_voronoi.py | Python | gpl-2.0 | 10,087 |
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple WebSocket client named echo_client just because of historical reason.
mod_pywebsocket directory must be in PYTHONPATH.
Example Usage:
# server setup
% cd $pywebsocket
% PYTHONPATH=$cwd/src python ./mod_pywebsocket/standalone.py -p 8880 \
-d $cwd/src/example
# run client
% PYTHONPATH=$cwd/src python ./src/example/echo_client.py -p 8880 \
-s localhost \
-o http://localhost -r /echo -m test
"""
from __future__ import absolute_import
from __future__ import print_function
import base64
import codecs
from hashlib import sha1
import logging
import argparse
import os
import random
import re
import six
import socket
import ssl
import struct
import sys
from mod_pywebsocket import common
from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor
from mod_pywebsocket.extensions import _PerMessageDeflateFramer
from mod_pywebsocket.extensions import _parse_window_bits
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
_TIMEOUT_SEC = 10
_UNDEFINED_PORT = -1
_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_PROTOCOL_VERSION_HYBI13 = 'hybi13'
class ClientHandshakeError(Exception):
pass
def _build_method_line(resource):
return 'GET %s HTTP/1.1\r\n' % resource
def _origin_header(header, origin):
# 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
# and the /origin/ value, converted to ASCII lowercase, to /fields/.
return '%s: %s\r\n' % (header, origin.lower())
def _format_host_header(host, port, secure):
# 4.1 9. Let /hostport/ be an empty string.
# 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
# /hostport/
hostport = host.lower()
# 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
# is true, and /port/ is not 443, then append a U+003A COLON character
# (:) followed by the value of /port/, expressed as a base-ten integer,
# to /hostport/
if ((not secure and port != common.DEFAULT_WEB_SOCKET_PORT)
or (secure and port != common.DEFAULT_WEB_SOCKET_SECURE_PORT)):
hostport += ':' + str(port)
# 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
# character, and /hostport/, to /fields/.
return '%s: %s\r\n' % (common.HOST_HEADER, hostport)
def _receive_bytes(socket, length):
recv_bytes = []
remaining = length
while remaining > 0:
received_bytes = socket.recv(remaining)
if not received_bytes:
raise IOError(
'Connection closed before receiving requested length '
'(requested %d bytes but received only %d bytes)' %
(length, length - remaining))
recv_bytes.append(received_bytes)
remaining -= len(received_bytes)
return b''.join(recv_bytes)
def _get_mandatory_header(fields, name):
"""Gets the value of the header specified by name from fields.
This function expects that there's only one header with the specified name
in fields. Otherwise, raises an ClientHandshakeError.
"""
values = fields.get(name.lower())
if values is None or len(values) == 0:
raise ClientHandshakeError('%s header not found: %r' % (name, values))
if len(values) > 1:
raise ClientHandshakeError('Multiple %s headers found: %r' %
(name, values))
return values[0]
def _validate_mandatory_header(fields,
name,
expected_value,
case_sensitive=False):
"""Gets and validates the value of the header specified by name from
fields.
If expected_value is specified, compares expected value and actual value
and raises an ClientHandshakeError on failure. You can specify case
sensitiveness in this comparison by case_sensitive parameter. This function
expects that there's only one header with the specified name in fields.
Otherwise, raises an ClientHandshakeError.
"""
value = _get_mandatory_header(fields, name)
if ((case_sensitive and value != expected_value) or
(not case_sensitive and value.lower() != expected_value.lower())):
raise ClientHandshakeError(
'Illegal value for header %s: %r (expected) vs %r (actual)' %
(name, expected_value, value))
class _TLSSocket(object):
"""Wrapper for a TLS connection."""
def __init__(self, raw_socket):
self._logger = util.get_class_logger(self)
self._tls_socket = ssl.wrap_socket(raw_socket)
# Print cipher in use. Handshake is done on wrap_socket call.
self._logger.info("Cipher: %s", self._tls_socket.cipher())
def send(self, data):
return self._tls_socket.write(data)
def sendall(self, data):
return self._tls_socket.sendall(data)
def recv(self, size=-1):
return self._tls_socket.read(size)
def close(self):
return self._tls_socket.close()
def getpeername(self):
return self._tls_socket.getpeername()
class ClientHandshakeBase(object):
"""A base class for WebSocket opening handshake processors for each
protocol version.
"""
def __init__(self):
self._logger = util.get_class_logger(self)
def _read_fields(self):
# 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
fields = {}
while True: # "Field"
# 4.1 33. let /name/ and /value/ be empty byte arrays
name = b''
value = b''
# 4.1 34. read /name/
name = self._read_name()
if name is None:
break
# 4.1 35. read spaces
# TODO(tyoshino): Skip only one space as described in the spec.
ch = self._skip_spaces()
# 4.1 36. read /value/
value = self._read_value(ch)
# 4.1 37. read a byte from the server
ch = _receive_bytes(self._socket, 1)
if ch != b'\n': # 0x0A
raise ClientHandshakeError(
'Expected LF but found %r while reading value %r for '
'header %r' % (ch, value, name))
self._logger.debug('Received %r header', name)
# 4.1 38. append an entry to the /fields/ list that has the name
# given by the string obtained by interpreting the /name/ byte
# array as a UTF-8 stream and the value given by the string
# obtained by interpreting the /value/ byte array as a UTF-8 byte
# stream.
fields.setdefault(name.decode('UTF-8'),
[]).append(value.decode('UTF-8'))
# 4.1 39. return to the "Field" step above
return fields
def _read_name(self):
# 4.1 33. let /name/ be empty byte arrays
name = b''
while True:
# 4.1 34. read a byte from the server
ch = _receive_bytes(self._socket, 1)
if ch == b'\r': # 0x0D
return None
elif ch == b'\n': # 0x0A
raise ClientHandshakeError(
'Unexpected LF when reading header name %r' % name)
elif ch == b':': # 0x3A
return name.lower()
else:
name += ch
def _skip_spaces(self):
# 4.1 35. read a byte from the server
while True:
ch = _receive_bytes(self._socket, 1)
if ch == b' ': # 0x20
continue
return ch
def _read_value(self, ch):
# 4.1 33. let /value/ be empty byte arrays
value = b''
# 4.1 36. read a byte from server.
while True:
if ch == b'\r': # 0x0D
return value
elif ch == b'\n': # 0x0A
raise ClientHandshakeError(
'Unexpected LF when reading header value %r' % value)
else:
value += ch
ch = _receive_bytes(self._socket, 1)
def _get_permessage_deflate_framer(extension_response):
"""Validate the response and return a framer object using the parameters in
the response. This method doesn't accept the server_.* parameters.
"""
client_max_window_bits = None
client_no_context_takeover = None
client_max_window_bits_name = (
PerMessageDeflateExtensionProcessor._CLIENT_MAX_WINDOW_BITS_PARAM)
client_no_context_takeover_name = (
PerMessageDeflateExtensionProcessor._CLIENT_NO_CONTEXT_TAKEOVER_PARAM)
# We didn't send any server_.* parameter.
# Handle those parameters as invalid if found in the response.
for param_name, param_value in extension_response.get_parameters():
if param_name == client_max_window_bits_name:
if client_max_window_bits is not None:
raise ClientHandshakeError('Multiple %s found' %
client_max_window_bits_name)
parsed_value = _parse_window_bits(param_value)
if parsed_value is None:
raise ClientHandshakeError(
'Bad %s: %r' % (client_max_window_bits_name, param_value))
client_max_window_bits = parsed_value
elif param_name == client_no_context_takeover_name:
if client_no_context_takeover is not None:
raise ClientHandshakeError('Multiple %s found' %
client_no_context_takeover_name)
if param_value is not None:
raise ClientHandshakeError(
'Bad %s: Has value %r' %
(client_no_context_takeover_name, param_value))
client_no_context_takeover = True
if client_no_context_takeover is None:
client_no_context_takeover = False
return _PerMessageDeflateFramer(client_max_window_bits,
client_no_context_takeover)
class ClientHandshakeProcessor(ClientHandshakeBase):
"""WebSocket opening handshake processor
"""
def __init__(self, socket, options):
super(ClientHandshakeProcessor, self).__init__()
self._socket = socket
self._options = options
self._logger = util.get_class_logger(self)
def handshake(self):
"""Performs opening handshake on the specified socket.
Raises:
ClientHandshakeError: handshake failed.
"""
request_line = _build_method_line(self._options.resource)
self._logger.debug('Client\'s opening handshake Request-Line: %r',
request_line)
self._socket.sendall(request_line.encode('UTF-8'))
fields = []
fields.append(
_format_host_header(self._options.server_host,
self._options.server_port,
self._options.use_tls))
fields.append(_UPGRADE_HEADER)
fields.append(_CONNECTION_HEADER)
if self._options.origin is not None:
fields.append(
_origin_header(common.ORIGIN_HEADER, self._options.origin))
original_key = os.urandom(16)
self._key = base64.b64encode(original_key)
self._logger.debug('%s: %r (%s)', common.SEC_WEBSOCKET_KEY_HEADER,
self._key, util.hexify(original_key))
fields.append(
'%s: %s\r\n' %
(common.SEC_WEBSOCKET_KEY_HEADER, self._key.decode('UTF-8')))
fields.append(
'%s: %d\r\n' %
(common.SEC_WEBSOCKET_VERSION_HEADER, common.VERSION_HYBI_LATEST))
extensions_to_request = []
if self._options.use_permessage_deflate:
extension = common.ExtensionParameter(
common.PERMESSAGE_DEFLATE_EXTENSION)
# Accept the client_max_window_bits extension parameter by default.
extension.add_parameter(
PerMessageDeflateExtensionProcessor.
_CLIENT_MAX_WINDOW_BITS_PARAM, None)
extensions_to_request.append(extension)
if len(extensions_to_request) != 0:
fields.append('%s: %s\r\n' %
(common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(extensions_to_request)))
for field in fields:
self._socket.sendall(field.encode('UTF-8'))
self._socket.sendall(b'\r\n')
self._logger.debug('Sent client\'s opening handshake headers: %r',
fields)
self._logger.debug('Start reading Status-Line')
status_line = b''
while True:
ch = _receive_bytes(self._socket, 1)
status_line += ch
if ch == b'\n':
break
m = re.match(b'HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line)
if m is None:
raise ClientHandshakeError('Wrong status line format: %r' %
status_line)
status_code = m.group(1)
if status_code != b'101':
self._logger.debug(
'Unexpected status code %s with following headers: %r',
status_code, self._read_fields())
raise ClientHandshakeError(
'Expected HTTP status code 101 but found %r' % status_code)
self._logger.debug('Received valid Status-Line')
self._logger.debug('Start reading headers until we see an empty line')
fields = self._read_fields()
ch = _receive_bytes(self._socket, 1)
if ch != b'\n': # 0x0A
raise ClientHandshakeError(
'Expected LF but found %r while reading value %r for header '
'name %r' % (ch, value, name))
self._logger.debug('Received an empty line')
self._logger.debug('Server\'s opening handshake headers: %r', fields)
_validate_mandatory_header(fields, common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE, False)
_validate_mandatory_header(fields, common.CONNECTION_HEADER,
common.UPGRADE_CONNECTION_TYPE, False)
accept = _get_mandatory_header(fields,
common.SEC_WEBSOCKET_ACCEPT_HEADER)
# Validate
try:
binary_accept = base64.b64decode(accept)
except TypeError:
raise HandshakeError('Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if len(binary_accept) != 20:
raise ClientHandshakeError(
'Decoded value of %s is not 20-byte long' %
common.SEC_WEBSOCKET_ACCEPT_HEADER)
self._logger.debug('Response for challenge : %r (%s)', accept,
util.hexify(binary_accept))
binary_expected_accept = sha1(self._key +
common.WEBSOCKET_ACCEPT_UUID).digest()
expected_accept = base64.b64encode(binary_expected_accept)
self._logger.debug('Expected response for challenge: %r (%s)',
expected_accept,
util.hexify(binary_expected_accept))
if accept != expected_accept.decode('UTF-8'):
raise ClientHandshakeError(
'Invalid %s header: %r (expected: %s)' %
(common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept))
permessage_deflate_accepted = False
extensions_header = fields.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower())
accepted_extensions = []
if extensions_header is not None and len(extensions_header) != 0:
accepted_extensions = common.parse_extensions(extensions_header[0])
for extension in accepted_extensions:
extension_name = extension.name()
if (extension_name == common.PERMESSAGE_DEFLATE_EXTENSION
and self._options.use_permessage_deflate):
permessage_deflate_accepted = True
framer = _get_permessage_deflate_framer(extension)
framer.set_compress_outgoing_enabled(True)
self._options.use_permessage_deflate = framer
continue
raise ClientHandshakeError('Unexpected extension %r' %
extension_name)
if (self._options.use_permessage_deflate
and not permessage_deflate_accepted):
raise ClientHandshakeError(
'Requested %s, but the server rejected it' %
common.PERMESSAGE_DEFLATE_EXTENSION)
# TODO(tyoshino): Handle Sec-WebSocket-Protocol
# TODO(tyoshino): Handle Cookie, etc.
class ClientConnection(object):
"""A wrapper for socket object to provide the mp_conn interface.
"""
def __init__(self, socket):
self._socket = socket
def write(self, data):
self._socket.sendall(data)
def read(self, n):
return self._socket.recv(n)
def get_remote_addr(self):
return self._socket.getpeername()
remote_addr = property(get_remote_addr)
class ClientRequest(object):
"""A wrapper class just to make it able to pass a socket object to
functions that expect a mp_request object.
"""
def __init__(self, socket):
self._logger = util.get_class_logger(self)
self._socket = socket
self.connection = ClientConnection(socket)
self.ws_version = common.VERSION_HYBI_LATEST
class EchoClient(object):
"""WebSocket echo client."""
def __init__(self, options):
self._options = options
self._socket = None
self._logger = util.get_class_logger(self)
def run(self):
"""Run the client.
Shake hands and then repeat sending message and receiving its echo.
"""
self._socket = socket.socket()
self._socket.settimeout(self._options.socket_timeout)
try:
self._socket.connect(
(self._options.server_host, self._options.server_port))
if self._options.use_tls:
self._socket = _TLSSocket(self._socket)
self._handshake = ClientHandshakeProcessor(self._socket,
self._options)
self._handshake.handshake()
self._logger.info('Connection established')
request = ClientRequest(self._socket)
stream_option = StreamOptions()
stream_option.mask_send = True
stream_option.unmask_receive = False
if self._options.use_permessage_deflate is not False:
framer = self._options.use_permessage_deflate
framer.setup_stream_options(stream_option)
self._stream = Stream(request, stream_option)
for line in self._options.message.split(','):
self._stream.send_message(line)
if self._options.verbose:
print('Send: %s' % line)
try:
received = self._stream.receive_message()
if self._options.verbose:
print('Recv: %s' % received)
except Exception as e:
if self._options.verbose:
print('Error: %s' % e)
raise
self._do_closing_handshake()
finally:
self._socket.close()
def _do_closing_handshake(self):
"""Perform closing handshake using the specified closing frame."""
if self._options.message.split(',')[-1] == _GOODBYE_MESSAGE:
# requested server initiated closing handshake, so
# expecting closing handshake message from server.
self._logger.info('Wait for server-initiated closing handshake')
message = self._stream.receive_message()
if message is None:
print('Recv close')
print('Send ack')
self._logger.info('Received closing handshake and sent ack')
return
print('Send close')
self._stream.close_connection()
self._logger.info('Sent closing handshake')
print('Recv ack')
self._logger.info('Received ack')
def main():
if six.PY2:
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
parser = argparse.ArgumentParser()
# We accept --command_line_flag style flags which is the same as Google
# gflags in addition to common --command-line-flag style flags.
parser.add_argument('-s',
'--server-host',
'--server_host',
dest='server_host',
type=six.text_type,
default='localhost',
help='server host')
parser.add_argument('-p',
'--server-port',
'--server_port',
dest='server_port',
type=int,
default=_UNDEFINED_PORT,
help='server port')
parser.add_argument('-o',
'--origin',
dest='origin',
type=six.text_type,
default=None,
help='origin')
parser.add_argument('-r',
'--resource',
dest='resource',
type=six.text_type,
default='/echo',
help='resource path')
parser.add_argument(
'-m',
'--message',
dest='message',
type=six.text_type,
default=u'Hello,\u65e5\u672c',
help=('comma-separated messages to send. '
'%s will force close the connection from server.' %
_GOODBYE_MESSAGE))
parser.add_argument('-q',
'--quiet',
dest='verbose',
action='store_false',
default=True,
help='suppress messages')
parser.add_argument('-t',
'--tls',
dest='use_tls',
action='store_true',
default=False,
help='use TLS (wss://).')
parser.add_argument('-k',
'--socket-timeout',
'--socket_timeout',
dest='socket_timeout',
type=int,
default=_TIMEOUT_SEC,
help='Timeout(sec) for sockets')
parser.add_argument('--use-permessage-deflate',
'--use_permessage_deflate',
dest='use_permessage_deflate',
action='store_true',
default=False,
help='Use the permessage-deflate extension.')
parser.add_argument('--log-level',
'--log_level',
type=six.text_type,
dest='log_level',
default='warn',
choices=['debug', 'info', 'warn', 'error', 'critical'],
help='Log level.')
options = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(options.log_level.upper()))
# Default port number depends on whether TLS is used.
if options.server_port == _UNDEFINED_PORT:
if options.use_tls:
options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
options.server_port = common.DEFAULT_WEB_SOCKET_PORT
EchoClient(options).run()
if __name__ == '__main__':
main()
# vi:sts=4 sw=4 et
| asajeffrey/servo | tests/wpt/web-platform-tests/tools/third_party/pywebsocket3/example/echo_client.py | Python | mpl-2.0 | 25,626 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestSumOp1(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 2}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp2(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': -1, 'reverse': True}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {
'Out': np.flip(
np.flip(
self.inputs['X'], axis=2).cumsum(axis=2), axis=2)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp3(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 1}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp4(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 0}
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp5(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.inputs = {'X': np.random.random((5, 6)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp7(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.inputs = {'X': np.random.random((6)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp8(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 2, "exclusive": True}
a = np.random.random((5, 6, 3)).astype("float64")
self.inputs = {'X': a}
self.outputs = {
'Out': np.concatenate(
(np.zeros(
(5, 6, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)),
axis=2)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
unittest.main()
| Canpio/Paddle | python/paddle/fluid/tests/unittests/test_cumsum_op.py | Python | apache-2.0 | 3,630 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import logging
import json
import hashlib
import itertools
import random
import numpy
import StringIO
import shutil
import tempfile
import copy
import pprint
from operator import itemgetter
from nupic.data import dictutils
from nupic.frameworks.opf import opfhelpers
from nupic.frameworks.opf.opfutils import InferenceType
from nupic.support import clippedObj
from nupic.support.serializationutils import sortedJSONDumpS
from nupic.support.configuration import Configuration
from nupic.support.errorcodes import ErrorCodes
from nupic.database.ClientJobsDAO import (
ClientJobsDAO, InvalidConnectionException)
from nupic.swarming.utils import (runModelGivenBaseAndParams,
runDummyModel)
from nupic.swarming.permutationhelpers import *
from nupic.frameworks.opf.exp_generator.ExpGenerator import expGenerator
def _flattenKeys(keys):
return '|'.join(keys)
class SwarmTerminator(object):
"""Class that records the performane of swarms in a sprint and makes
decisions about which swarms should stop running. This is a usful optimization
that identifies field combinations that no longer need to be run.
"""
MATURITY_WINDOW = None
MAX_GENERATIONS = None
_DEFAULT_MILESTONES = [1.0 / (x + 1) for x in xrange(12)]
def __init__(self, milestones=None, logLevel=None):
# Set class constants.
self.MATURITY_WINDOW = int(Configuration.get(
"nupic.hypersearch.swarmMaturityWindow"))
self.MAX_GENERATIONS = int(Configuration.get(
"nupic.hypersearch.swarmMaxGenerations"))
if self.MAX_GENERATIONS < 0:
self.MAX_GENERATIONS = None
# Set up instsance variables.
self._isTerminationEnabled = bool(int(Configuration.get(
'nupic.hypersearch.enableSwarmTermination')))
self.swarmBests = dict()
self.swarmScores = dict()
self.terminatedSwarms = set([])
self._logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
if milestones is not None:
self.milestones = milestones
else:
self.milestones = copy.deepcopy(self._DEFAULT_MILESTONES)
def recordDataPoint(self, swarmId, generation, errScore):
"""Record the best score for a swarm's generation index (x)
Returns list of swarmIds to terminate.
"""
terminatedSwarms = []
# Append score to existing swarm.
if swarmId in self.swarmScores:
entry = self.swarmScores[swarmId]
assert(len(entry) == generation)
entry.append(errScore)
entry = self.swarmBests[swarmId]
entry.append(min(errScore, entry[-1]))
assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId]))
else:
# Create list of scores for a new swarm
assert (generation == 0)
self.swarmScores[swarmId] = [errScore]
self.swarmBests[swarmId] = [errScore]
# If the current swarm hasn't completed at least MIN_GENERATIONS, it should
# not be candidate for maturation or termination. This prevents the initial
# allocation of particles in PSO from killing off a field combination too
# early.
if generation + 1 < self.MATURITY_WINDOW:
return terminatedSwarms
# If the swarm has completed more than MAX_GENERATIONS, it should be marked
# as mature, regardless of how its value is changing.
if self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS:
self._logger.info(
'Swarm %s has matured (more than %d generations). Stopping' %
(swarmId, self.MAX_GENERATIONS))
terminatedSwarms.append(swarmId)
if self._isTerminationEnabled:
terminatedSwarms.extend(self._getTerminatedSwarms(generation))
# Return which swarms to kill when we've reached maturity
# If there is no change in the swarm's best for some time,
# Mark it dead
cumulativeBestScores = self.swarmBests[swarmId]
if cumulativeBestScores[-1] == cumulativeBestScores[-self.MATURITY_WINDOW]:
self._logger.info('Swarm %s has matured (no change in %d generations).'
'Stopping...'% (swarmId, self.MATURITY_WINDOW))
terminatedSwarms.append(swarmId)
self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms)
return terminatedSwarms
def numDataPoints(self, swarmId):
if swarmId in self.swarmScores:
return len(self.swarmScores[swarmId])
else:
return 0
def _getTerminatedSwarms(self, generation):
terminatedSwarms = []
generationScores = dict()
for swarm, scores in self.swarmScores.iteritems():
if len(scores) > generation and swarm not in self.terminatedSwarms:
generationScores[swarm] = scores[generation]
if len(generationScores) == 0:
return
bestScore = min(generationScores.values())
tolerance = self.milestones[generation]
for swarm, score in generationScores.iteritems():
if score > (1 + tolerance) * bestScore:
self._logger.info('Swarm %s is doing poorly at generation %d.\n'
'Current Score:%s \n'
'Best Score:%s \n'
'Tolerance:%s. Stopping...',
swarm, generation, score, bestScore, tolerance)
terminatedSwarms.append(swarm)
return terminatedSwarms
class ResultsDB(object):
"""This class holds all the information we have accumulated on completed
models, which particles were used, etc.
When we get updated results sent to us (via recordModelProgress), we
record it here for access later by various functions in this module.
"""
def __init__(self, hsObj):
""" Instantiate our results database
Parameters:
--------------------------------------------------------------------
hsObj: Reference to the HypersearchV2 instance
"""
self._hsObj = hsObj
# This list holds all the results we have so far on every model. In
# addition, we maintain mutliple other data structures which provide
# faster access into portions of this list
self._allResults = []
# Models that completed with errors and all completed.
# These are used to determine when we should abort because of too many
# errors
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
# Map of the model ID to index of result in _allResults
self._modelIDToIdx = dict()
# The global best result on the optimize metric so far, and the model ID
self._bestResult = numpy.inf
self._bestModelID = None
# This is a dict of dicts. The top level dict has the swarmId as the key.
# Each entry is a dict of genIdx: (modelId, errScore) entries.
self._swarmBestOverall = dict()
# For each swarm, we keep track of how many particles we have per generation
# The key is the swarmId, the value is a list of the number of particles
# at each generation
self._swarmNumParticlesPerGeneration = dict()
# The following variables are used to support the
# getMaturedSwarmGenerations() call.
#
# The _modifiedSwarmGens set contains the set of (swarmId, genIdx) tuples
# that have had results reported to them since the last time
# getMaturedSwarmGenerations() was called.
#
# The maturedSwarmGens contains (swarmId,genIdx) tuples, one for each
# swarm generation index which we have already detected has matured. This
# insures that if by chance we get a rogue report from a model in a swarm
# generation index which we have already assumed was matured that we won't
# report on it again.
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
# For each particle, we keep track of it's best score (across all
# generations) and the position it was at when it got that score. The keys
# in this dict are the particleId, the values are (bestResult, position),
# where position is a dict with varName:position items in it.
self._particleBest = dict()
# For each particle, we keep track of it's latest generation index.
self._particleLatestGenIdx = dict()
# For each swarm, we keep track of which models are in it. The key
# is the swarmId, the value is a list of indexes into self._allResults.
self._swarmIdToIndexes = dict()
# ParamsHash to index mapping
self._paramsHashToIndexes = dict()
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore
def getNumErrModels(self):
"""Return number of models that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return self._numErrModels
def getErrModelIds(self):
"""Return list of models IDs that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return list(self._errModels)
def getNumCompletedModels(self):
"""Return total number of models that completed.
Parameters:
---------------------------------------------------------------------
retval: # if models that completed
"""
return self._numCompletedModels
def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None
def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']])
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore)
def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured'])
def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result
def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0]
def highestGeneration(self, swarmId):
""" Return the generation index of the highest generation in the given
swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
retval: generation index
"""
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
return len(numPsPerGen)-1
def getParticleBest(self, particleId):
""" Return the best score and position for a given particle. The position
is given as a dict, with varName:varPosition items in it.
Parameters:
---------------------------------------------------------------------
particleId: which particle
retval: (bestResult, bestPosition)
"""
return self._particleBest.get(particleId, (None, None))
def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results
class Particle(object):
"""Construct a particle. Each particle evaluates one or more models
serially. Each model represents a position that the particle is evaluated
at.
Each position is a set of values chosen for each of the permutation variables.
The particle's best position is the value of the permutation variables when it
did best on the optimization metric.
Some permutation variables are treated like traditional particle swarm
variables - that is they have a position and velocity. Others are simply
choice variables, for example a list of strings. We follow a different
methodology for choosing each permutation variable value depending on its
type.
A particle belongs to 1 and only 1 swarm. A swarm is a collection of particles
that all share the same global best position. A swarm is identified by its
specific combination of fields. If we are evaluating multiple different field
combinations, then there will be multiple swarms. A Hypersearch Worker (HSW)
will only instantiate and run one particle at a time. When done running a
particle, another worker can pick it up, pick a new position, for it and run
it based on the particle state information which is stored in each model table
entry.
Each particle has a generationIdx. It starts out at generation #0. Every time
a model evaluation completes and the particle is moved to a different position
(to evaluate a different model), the generation index is incremented.
Every particle that is created has a unique particleId. The particleId
is a string formed as '<workerConnectionId>.<particleIdx>', where particleIdx
starts at 0 for each worker and increments by 1 every time a new particle
is created by that worker.
"""
_nextParticleID = 0
def __init__(self, hsObj, resultsDB, flattenedPermuteVars,
swarmId=None, newFarFrom=None, evolveFromState=None,
newFromClone=None, newParticleId=False):
""" Create a particle.
There are 3 fundamentally different methods of instantiating a particle:
1.) You can instantiate a new one from scratch, at generation index #0. This
particle gets a new particleId.
required: swarmId
optional: newFarFrom
must be None: evolveFromState, newFromClone
2.) You can instantiate one from savedState, in which case it's generation
index is incremented (from the value stored in the saved state) and
its particleId remains the same.
required: evolveFromState
optional:
must be None: flattenedPermuteVars, swarmId, newFromClone
3.) You can clone another particle, creating a new particle at the same
generationIdx but a different particleId. This new particle will end
up at exactly the same position as the one it was cloned from. If
you want to move it to the next position, or just jiggle it a bit, call
newPosition() or agitate() after instantiation.
required: newFromClone
optional:
must be None: flattenedPermuteVars, swarmId, evolveFromState
Parameters:
--------------------------------------------------------------------
hsObj: The HypersearchV2 instance
resultsDB: the ResultsDB instance that holds all the model results
flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs
of the flattened permutation variables as read from the permutations
file.
swarmId: String that represents the encoder names of the encoders that are
to be included in this particle's model. Of the form
'encoder1.encoder2'.
Required for creation method #1.
newFarFrom: If not None, this is a list of other particleState dicts in the
swarm that we want to be as far away from as possible. Optional
argument for creation method #1.
evolveFromState: If not None, evolve an existing particle. This is a
dict containing the particle's state. Preserve the particleId, but
increment the generation index. Required for creation method #2.
newFromClone: If not None, clone this other particle's position and generation
index, with small random perturbations. This is a dict containing the
particle's state. Required for creation method #3.
newParticleId: Only applicable when newFromClone is True. Give the clone
a new particle ID.
"""
# Save constructor arguments
self._hsObj = hsObj
self.logger = hsObj.logger
self._resultsDB = resultsDB
# See the random number generator used for all the variables in this
# particle. We will seed it differently based on the construction method,
# below.
self._rng = random.Random()
self._rng.seed(42)
# Setup our variable set by taking what's in flattenedPermuteVars and
# stripping out vars that belong to encoders we are not using.
def _setupVars(flattenedPermuteVars):
allowedEncoderNames = self.swarmId.split('.')
self.permuteVars = copy.deepcopy(flattenedPermuteVars)
# Remove fields we don't want.
varNames = self.permuteVars.keys()
for varName in varNames:
# Remove encoders we're not using
if ':' in varName: # if an encoder
if varName.split(':')[0] not in allowedEncoderNames:
self.permuteVars.pop(varName)
continue
# All PermuteChoice variables need to know all prior results obtained
# with each choice.
if isinstance(self.permuteVars[varName], PermuteChoices):
if self._hsObj._speculativeParticles:
maxGenIdx = None
else:
maxGenIdx = self.genIdx-1
resultsPerChoice = self._resultsDB.getResultsPerChoice(
swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName)
self.permuteVars[varName].setResultsPerChoice(
resultsPerChoice.values())
# Method #1
# Create from scratch, optionally pushing away from others that already
# exist.
if swarmId is not None:
assert (evolveFromState is None)
assert (newFromClone is None)
# Save construction param
self.swarmId = swarmId
# Assign a new unique ID to this particle
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
# Init the generation index
self.genIdx = 0
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Push away from other particles?
if newFarFrom is not None:
for varName in self.permuteVars.iterkeys():
otherPositions = []
for particleState in newFarFrom:
otherPositions.append(particleState['varStates'][varName]['position'])
self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng)
# Give this particle a unique seed.
self._rng.seed(str(otherPositions))
# Method #2
# Instantiate from saved state, preserving particleId but incrementing
# generation index.
elif evolveFromState is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (newFromClone is None)
# Setup other variables from saved state
self.particleId = evolveFromState['id']
self.genIdx = evolveFromState['genIdx'] + 1
self.swarmId = evolveFromState['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# saved state
self.initStateFrom(self.particleId, evolveFromState, newBest=True)
# Move it to the next position. We need the swarm best for this.
self.newPosition()
# Method #3
# Clone another particle, producing a new particle at the same genIdx with
# the same particleID. This is used to re-run an orphaned model.
elif newFromClone is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (evolveFromState is None)
# Setup other variables from clone particle
self.particleId = newFromClone['id']
if newParticleId:
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
self.genIdx = newFromClone['genIdx']
self.swarmId = newFromClone['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# the clone
self.initStateFrom(self.particleId, newFromClone, newBest=False)
else:
assert False, "invalid creation parameters"
# Log it
self.logger.debug("Created particle: %s" % (str(self)))
def __repr__(self):
return "Particle(swarmId=%s) [particleId=%s, genIdx=%d, " \
"permuteVars=\n%s]" % (self.swarmId, self.particleId,
self.genIdx, pprint.pformat(self.permuteVars, indent=4))
def getState(self):
"""Get the particle state as a dict. This is enough information to
instantiate this particle on another worker."""
varStates = dict()
for varName, var in self.permuteVars.iteritems():
varStates[varName] = var.getState()
return dict(id = self.particleId,
genIdx = self.genIdx,
swarmId = self.swarmId,
varStates = varStates)
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState)
def copyEncoderStatesFrom(self, particleState):
"""Copy all encoder variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if ':' in varName: # if an encoder
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def getPosition(self):
"""Return the position of this particle. This returns a dict() of key
value pairs where each key is the name of the flattened permutation
variable and the value is its chosen value.
Parameters:
--------------------------------------------------------------
retval: dict() of flattened permutation choices
"""
result = dict()
for (varName, value) in self.permuteVars.iteritems():
result[varName] = value.getPosition()
return result
@staticmethod
def getPositionFromState(pState):
"""Return the position of a particle given its state dict.
Parameters:
--------------------------------------------------------------
retval: dict() of particle position, keys are the variable names,
values are their positions
"""
result = dict()
for (varName, value) in pState['varStates'].iteritems():
result[varName] = value['position']
return result
def agitate(self):
"""Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None
"""
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition()
def newPosition(self, whichVars=None):
# TODO: incorporate data from choice variables....
# TODO: make sure we're calling this when appropriate.
"""Choose a new position based on results obtained so far from all other
particles.
Parameters:
--------------------------------------------------------------
whichVars: If not None, only move these variables
retval: new position
"""
# Get the global best position for this swarm generation
globalBestPosition = None
# If speculative particles are enabled, use the global best considering
# even particles in the current generation. This gives better results
# but does not provide repeatable results because it depends on
# worker timing
if self._hsObj._speculativeParticles:
genIdx = self.genIdx
else:
genIdx = self.genIdx - 1
if genIdx >= 0:
(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId, genIdx)
if bestModelId is not None:
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(bestModelId)
globalBestPosition = Particle.getPositionFromState(particleState)
# Update each variable
for (varName, var) in self.permuteVars.iteritems():
if whichVars is not None and varName not in whichVars:
continue
if globalBestPosition is None:
var.newPosition(None, self._rng)
else:
var.newPosition(globalBestPosition[varName], self._rng)
# get the new position
position = self.getPosition()
# Log the new position
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "New particle position: \n%s" % (pprint.pformat(position,
indent=4))
print >> msg, "Particle variables:"
for (varName, var) in self.permuteVars.iteritems():
print >> msg, " %s: %s" % (varName, str(var))
self.logger.debug(msg.getvalue())
msg.close()
return position
class HsState(object):
"""This class encapsulates the Hypersearch state which we share with all
other workers. This state gets serialized into a JSON dict and written to
the engWorkerState field of the job record.
Whenever a worker changes this state, it does an atomic setFieldIfEqual to
insure it has the latest state as updated by any other worker as a base.
Here is an example snapshot of this state information:
swarms = {'a': {'status': 'completed', # 'active','completing','completed',
# or 'killed'
'bestModelId': <modelID>, # Only set for 'completed' swarms
'bestErrScore': <errScore>, # Only set for 'completed' swarms
'sprintIdx': 0,
},
'a.b': {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 1,
}
}
sprints = [{'status': 'completed', # 'active','completing','completed'
'bestModelId': <modelID>, # Only set for 'completed' sprints
'bestErrScore': <errScore>, # Only set for 'completed' sprints
},
{'status': 'completing',
'bestModelId': <None>,
'bestErrScore': <None>
}
{'status': 'active',
'bestModelId': None
'bestErrScore': None
}
]
"""
def __init__(self, hsObj):
""" Create our state object.
Parameters:
---------------------------------------------------------------------
hsObj: Reference to the HypersesarchV2 instance
cjDAO: ClientJobsDAO instance
logger: logger to use
jobID: our JobID
"""
# Save constructor parameters
self._hsObj = hsObj
# Convenient access to the logger
self.logger = self._hsObj.logger
# This contains our current state, and local working changes
self._state = None
# This contains the state we last read from the database
self._priorStateJSON = None
# Set when we make a change to our state locally
self._dirty = False
# Read in the initial state
self.readStateFromDB()
def isDirty(self):
"""Return true if our local copy of the state has changed since the
last time we read from the DB.
"""
return self._dirty
def isSearchOver(self):
"""Return true if the search should be considered over."""
return self._state['searchOver']
def readStateFromDB(self):
"""Set our state to that obtained from the engWorkerState field of the
job record.
Parameters:
---------------------------------------------------------------------
stateJSON: JSON encoded state from job record
"""
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
# Init if no prior state yet
if self._priorStateJSON is None:
swarms = dict()
# Fast Swarm, first and only sprint has one swarm for each field
# in fixedFields
if self._hsObj._fixedFields is not None:
print self._hsObj._fixedFields
encoderSet = []
for field in self._hsObj._fixedFields:
if field =='_classifierInput':
continue
encoderName = self.getEncoderKeyFromName(field)
assert encoderName in self._hsObj._encoderNames, "The field '%s' " \
" specified in the fixedFields list is not present in this " \
" model." % (field)
encoderSet.append(encoderName)
encoderSet.sort()
swarms['.'.join(encoderSet)] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Temporal prediction search, first sprint has N swarms of 1 field each,
# the predicted field may or may not be that one field.
elif self._hsObj._searchType == HsSearchType.temporal:
for encoderName in self._hsObj._encoderNames:
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Classification prediction search, first sprint has N swarms of 1 field
# each where this field can NOT be the predicted field.
elif self._hsObj._searchType == HsSearchType.classification:
for encoderName in self._hsObj._encoderNames:
if encoderName == self._hsObj._predictedFieldEncoder:
continue
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Legacy temporal. This is either a model that uses reconstruction or
# an older multi-step model that doesn't have a separate
# 'classifierOnly' encoder for the predicted field. Here, the predicted
# field must ALWAYS be present and the first sprint tries the predicted
# field only
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
swarms[self._hsObj._predictedFieldEncoder] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
else:
raise RuntimeError("Unsupported search type: %s" % \
(self._hsObj._searchType))
# Initialize the state.
self._state = dict(
# The last time the state was updated by a worker.
lastUpdateTime = time.time(),
# Set from within setSwarmState() if we detect that the sprint we just
# completed did worse than a prior sprint. This stores the index of
# the last good sprint.
lastGoodSprint = None,
# Set from within setSwarmState() if lastGoodSprint is True and all
# sprints have completed.
searchOver = False,
# This is a summary of the active swarms - this information can also
# be obtained from the swarms entry that follows, but is summarized here
# for easier reference when viewing the state as presented by
# log messages and prints of the hsState data structure (by
# GrokRunPermutations).
activeSwarms = swarms.keys(),
# All the swarms that have been created so far.
swarms = swarms,
# All the sprints that have completed or are in progress.
sprints = [{'status': 'active',
'bestModelId': None,
'bestErrScore': None}],
# The list of encoders we have "blacklisted" because they
# performed so poorly.
blackListedEncoders = [],
)
# This will do nothing if the value of engWorkerState is not still None.
self._hsObj._cjDAO.jobSetFieldIfEqual(
self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None)
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(
self._hsObj._jobID, ['engWorkerState'])[0]
assert (self._priorStateJSON is not None)
# Read state from the database
self._state = json.loads(self._priorStateJSON)
self._dirty = False
def writeStateToDB(self):
"""Update the state in the job record with our local changes (if any).
If we don't have the latest state in our priorStateJSON, then re-load
in the latest state and return False. If we were successful writing out
our changes, return True
Parameters:
---------------------------------------------------------------------
retval: True if we were successful writing out our changes
False if our priorState is not the latest that was in the DB.
In this case, we will re-load our state from the DB
"""
# If no changes, do nothing
if not self._dirty:
return True
# Set the update time
self._state['lastUpdateTime'] = time.time()
newStateJSON = json.dumps(self._state)
success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,
'engWorkerState', str(newStateJSON), str(self._priorStateJSON))
if success:
self.logger.debug("Success changing hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = newStateJSON
# If no success, read in the current state from the DB
else:
self.logger.debug("Failed to change hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
self._state = json.loads(self._priorStateJSON)
self.logger.info("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._state, indent=4)))
return success
##########################################################################
def getEncoderNameFromKey(self, key):
""" Given an encoder dictionary key, get the encoder name.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return key.split('|')[-1]
##########################################################################
def getEncoderKeyFromName(self, name):
""" Given an encoder name, get the key.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return 'modelParams|sensorParams|encoders|%s' % (name)
##########################################################################
def getFieldContributions(self):
"""Return the field contributions statistics.
Parameters:
---------------------------------------------------------------------
retval: Dictionary where the keys are the field names and the values
are how much each field contributed to the best score.
"""
#in the fast swarm, there is only 1 sprint and field contributions are
#not defined
if self._hsObj._fixedFields is not None:
return dict(), dict()
# Get the predicted field encoder name
predictedEncoderName = self._hsObj._predictedFieldEncoder
# -----------------------------------------------------------------------
# Collect all the single field scores
fieldScores = []
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 1:
continue
field = self.getEncoderNameFromKey(encodersUsed[0])
bestScore = info['bestErrScore']
# If the bestScore is None, this swarm hasn't completed yet (this could
# happen if we're exiting because of maxModels), so look up the best
# score so far
if bestScore is None:
(_modelId, bestScore) = \
self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
fieldScores.append((bestScore, field))
# -----------------------------------------------------------------------
# If we only have 1 field that was tried in the first sprint, then use that
# as the base and get the contributions from the fields in the next sprint.
if self._hsObj._searchType == HsSearchType.legacyTemporal:
assert(len(fieldScores)==1)
(baseErrScore, baseField) = fieldScores[0]
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 2:
continue
fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]
fields.remove(baseField)
fieldScores.append((info['bestErrScore'], fields[0]))
# The first sprint tried a bunch of fields, pick the worst performing one
# (within the top self._hsObj._maxBranching ones) as the base
else:
fieldScores.sort(reverse=True)
# If maxBranching was specified, pick the worst performing field within
# the top maxBranching+1 fields as our base, which will give that field
# a contribution of 0.
if self._hsObj._maxBranching > 0 \
and len(fieldScores) > self._hsObj._maxBranching:
baseErrScore = fieldScores[-self._hsObj._maxBranching-1][0]
else:
baseErrScore = fieldScores[0][0]
# -----------------------------------------------------------------------
# Prepare and return the fieldContributions dict
pctFieldContributionsDict = dict()
absFieldContributionsDict = dict()
# If we have no base score, can't compute field contributions. This can
# happen when we exit early due to maxModels or being cancelled
if baseErrScore is not None:
# If the base error score is 0, we can't compute a percent difference
# off of it, so move it to a very small float
if abs(baseErrScore) < 0.00001:
baseErrScore = 0.00001
for (errScore, field) in fieldScores:
if errScore is not None:
pctBetter = (baseErrScore - errScore) * 100.0 / baseErrScore
else:
pctBetter = 0.0
errScore = baseErrScore # for absFieldContribution
pctFieldContributionsDict[field] = pctBetter
absFieldContributionsDict[field] = baseErrScore - errScore
self.logger.debug("FieldContributions: %s" % (pctFieldContributionsDict))
return pctFieldContributionsDict, absFieldContributionsDict
def getAllSwarms(self, sprintIdx):
"""Return the list of all swarms in the given sprint.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx:
swarmIds.append(swarmId)
return swarmIds
def getActiveSwarms(self, sprintIdx=None):
"""Return the list of active swarms in the given sprint. These are swarms
which still need new particles created in them.
Parameters:
---------------------------------------------------------------------
sprintIdx: which sprint to query. If None, get active swarms from all
sprints
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if sprintIdx is not None and info['sprintIdx'] != sprintIdx:
continue
if info['status'] == 'active':
swarmIds.append(swarmId)
return swarmIds
def getNonKilledSwarms(self, sprintIdx):
"""Return the list of swarms in the given sprint that were not killed.
This is called when we are trying to figure out which encoders to carry
forward to the next sprint. We don't want to carry forward encoder
combintations which were obviously bad (in killed swarms).
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx and info['status'] != 'killed':
swarmIds.append(swarmId)
return swarmIds
def getCompletedSwarms(self):
"""Return the list of all completed swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completed':
swarmIds.append(swarmId)
return swarmIds
def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds
def bestModelInCompletedSwarm(self, swarmId):
"""Return the best model ID and it's errScore from the given swarm.
If the swarm has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
swarmInfo = self._state['swarms'][swarmId]
return (swarmInfo['bestModelId'],
swarmInfo['bestErrScore'])
def bestModelInCompletedSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint.
If the sprint has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
sprintInfo = self._state['sprints'][sprintIdx]
return (sprintInfo['bestModelId'],
sprintInfo['bestErrScore'])
def bestModelInSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore)
def setSwarmState(self, swarmId, newStatus):
"""Change the given swarm's state to 'newState'. If 'newState' is
'completed', then bestModelId and bestErrScore must be provided.
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id
newStatus: new status, either 'active', 'completing', 'completed', or
'killed'
"""
assert (newStatus in ['active', 'completing', 'completed', 'killed'])
# Set the swarm status
swarmInfo = self._state['swarms'][swarmId]
if swarmInfo['status'] == newStatus:
return
# If some other worker noticed it as completed, setting it to completing
# is obviously old information....
if swarmInfo['status'] == 'completed' and newStatus == 'completing':
return
self._dirty = True
swarmInfo['status'] = newStatus
if newStatus == 'completed':
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
swarmInfo['bestModelId'] = modelId
swarmInfo['bestErrScore'] = errScore
# If no longer active, remove it from the activeSwarms entry
if newStatus != 'active' and swarmId in self._state['activeSwarms']:
self._state['activeSwarms'].remove(swarmId)
# If new status is 'killed', kill off any running particles in that swarm
if newStatus=='killed':
self._hsObj.killSwarmParticles(swarmId)
# In case speculative particles are enabled, make sure we generate a new
# swarm at this time if all of the swarms in the current sprint have
# completed. This will insure that we don't mark the sprint as completed
# before we've created all the possible swarms.
sprintIdx = swarmInfo['sprintIdx']
self.isSprintActive(sprintIdx)
# Update the sprint status. Check all the swarms that belong to this sprint.
# If they are all completed, the sprint is completed.
sprintInfo = self._state['sprints'][sprintIdx]
statusCounts = dict(active=0, completing=0, completed=0, killed=0)
bestModelIds = []
bestErrScores = []
for info in self._state['swarms'].itervalues():
if info['sprintIdx'] != sprintIdx:
continue
statusCounts[info['status']] += 1
if info['status'] == 'completed':
bestModelIds.append(info['bestModelId'])
bestErrScores.append(info['bestErrScore'])
if statusCounts['active'] > 0:
sprintStatus = 'active'
elif statusCounts['completing'] > 0:
sprintStatus = 'completing'
else:
sprintStatus = 'completed'
sprintInfo['status'] = sprintStatus
# If the sprint is complete, get the best model from all of its swarms and
# store that as the sprint best
if sprintStatus == 'completed':
if len(bestErrScores) > 0:
whichIdx = numpy.array(bestErrScores).argmin()
sprintInfo['bestModelId'] = bestModelIds[whichIdx]
sprintInfo['bestErrScore'] = bestErrScores[whichIdx]
else:
# This sprint was empty, most likely because all particles were
# killed. Give it a huge error score
sprintInfo['bestModelId'] = 0
sprintInfo['bestErrScore'] = numpy.inf
# See if our best err score got NO BETTER as compared to a previous
# sprint. If so, stop exploring subsequent sprints (lastGoodSprint
# is no longer None).
bestPrior = numpy.inf
for idx in range(sprintIdx):
if self._state['sprints'][idx]['status'] == 'completed':
(_, errScore) = self.bestModelInCompletedSprint(idx)
if errScore is None:
errScore = numpy.inf
else:
errScore = numpy.inf
if errScore < bestPrior:
bestPrior = errScore
if sprintInfo['bestErrScore'] >= bestPrior:
self._state['lastGoodSprint'] = sprintIdx-1
# If ALL sprints up to the last good one are done, the search is now over
if self._state['lastGoodSprint'] is not None \
and not self.anyGoodSprintsActive():
self._state['searchOver'] = True
def anyGoodSprintsActive(self):
"""Return True if there are any more good sprints still being explored.
A 'good' sprint is one that is earlier than where we detected an increase
in error from sprint to subsequent sprint.
"""
if self._state['lastGoodSprint'] is not None:
goodSprints = self._state['sprints'][0:self._state['lastGoodSprint']+1]
else:
goodSprints = self._state['sprints']
for sprint in goodSprints:
if sprint['status'] == 'active':
anyActiveSprints = True
break
else:
anyActiveSprints = False
return anyActiveSprints
def isSprintCompleted(self, sprintIdx):
"""Return True if the given sprint has completed."""
numExistingSprints = len(self._state['sprints'])
if sprintIdx >= numExistingSprints:
return False
return (self._state['sprints'][sprintIdx]['status'] == 'completed')
def killUselessSwarms(self):
"""See if we can kill off some speculative swarms. If an earlier sprint
has finally completed, we can now tell which fields should *really* be present
in the sprints we've already started due to speculation, and kill off the
swarms that should not have been included.
"""
# Get number of existing sprints
numExistingSprints = len(self._state['sprints'])
# Should we bother killing useless swarms?
if self._hsObj._searchType == HsSearchType.legacyTemporal:
if numExistingSprints <= 2:
return
else:
if numExistingSprints <= 1:
return
# Form completedSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# ex. completedSwarms:
# [('a', {...}, 1.4),
# ('b', {...}, 2.0),
# ('c', {...}, 3.0)]
completedSwarms = self.getCompletedSwarms()
completedSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in completedSwarms]
# Form the completedMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore).
# ex. completedMatrix:
# [(('a', {...}, 1.4), ('b', {...}, 2.0), ('c', {...}, 3.0)),
# (('a.b', {...}, 3.0), ('b.c', {...}, 4.0))]
completedMatrix = [[] for i in range(numExistingSprints)]
for swarm in completedSwarms:
completedMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in completedMatrix:
sprint.sort(key=itemgetter(2))
# Form activeSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# Include all activeSwarms and completingSwarms
# ex. activeSwarms:
# [('d', {...}, 1.4),
# ('e', {...}, 2.0),
# ('f', {...}, 3.0)]
activeSwarms = self.getActiveSwarms()
# Append the completing swarms
activeSwarms.extend(self.getCompletingSwarms())
activeSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in activeSwarms]
# Form the activeMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore)
# ex. activeMatrix:
# [(('d', {...}, 1.4), ('e', {...}, 2.0), ('f', {...}, 3.0)),
# (('d.e', {...}, 3.0), ('e.f', {...}, 4.0))]
activeMatrix = [[] for i in range(numExistingSprints)]
for swarm in activeSwarms:
activeMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in activeMatrix:
sprint.sort(key=itemgetter(2))
# Figure out which active swarms to kill
toKill = []
for i in range(1, numExistingSprints):
for swarm in activeMatrix[i]:
curSwarmEncoders = swarm[0].split(".")
# If previous sprint is complete, get the best swarm and kill all active
# sprints that are not supersets
if(len(activeMatrix[i-1])==0):
# If we are trying all possible 3 field combinations, don't kill any
# off in sprint 2
if i==2 and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
pass
else:
bestInPrevious = completedMatrix[i-1][0]
bestEncoders = bestInPrevious[0].split('.')
for encoder in bestEncoders:
if not encoder in curSwarmEncoders:
toKill.append(swarm)
# if there are more than two completed encoders sets that are complete and
# are worse than at least one active swarm in the previous sprint. Remove
# any combinations that have any pair of them since they cannot have the best encoder.
#elif(len(completedMatrix[i-1])>1):
# for completedSwarm in completedMatrix[i-1]:
# activeMatrix[i-1][0][2]<completed
# Mark the bad swarms as killed
if len(toKill) > 0:
print "ParseMe: Killing encoders:" + str(toKill)
for swarm in toKill:
self.setSwarmState(swarm[0], "killed")
return
def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints-1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
return (active, False)
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
else:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
if not active:
return (active, False)
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,
matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes \
if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False)
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True)
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True)
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 \
and self._state['sprints'][sprintIdx-1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(
bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')]
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
else:
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx-1):
baseEncoderSets.append(swarmId.split('.'))
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 \
or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or \
self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1
else:
raise RuntimeError("Unimplemented search type %s" % \
(self._hsObj._searchType))
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
pctFieldContributions, absFieldContributions = \
self.getFieldContributions()
toRemove = []
self.logger.debug("FieldContributions min: %s" % \
(self._hsObj._minFieldContribution))
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug("FieldContributions removing: %s" % (fieldname))
toRemove.append(self.getEncoderKeyFromName(fieldname))
else:
self.logger.debug("FieldContributions keeping: %s" % (fieldname))
# Grab the top maxBranching base sprint swarms.
swarms = self._state["swarms"]
sprintSwarms = [(swarm, swarms[swarm]["bestErrScore"]) \
for swarm in swarms if swarms[swarm]["sprintIdx"] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split(".")
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder)
encoderAddSet = [encoder for encoder in encoderAddSet \
if not str(encoder) in toRemove]
# If no limit on the branching or min contribution, simply use all of the
# encoders.
else:
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal \
or self._hsObj._searchType == HsSearchType.legacyTemporal) \
and sprintIdx == 2 \
and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') \
or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder)
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
else:
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] \
and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False)
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
else:
return (False, True)
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state["sprints"]) == sprintIdx:
self._state['sprints'].append({'status': 'active',
'bestModelId': None,
'bestErrScore': None})
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': sprintIdx}
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False)
# No success, loop back with the updated state and try again
class HsSearchType(object):
"""This class enumerates the types of search we can perform."""
temporal = 'temporal'
legacyTemporal = 'legacyTemporal'
classification = 'classification'
class HypersearchV2(object):
"""The v2 Hypersearch implementation. This is one example of a Hypersearch
implementation that can be used by the HypersearchWorker. Other implementations
just have to implement the following methods:
createModels()
recordModelProgress()
getPermutationVariables()
getComplexVariableLabelLookupDict()
This implementation uses a hybrid of Particle Swarm Optimization (PSO) and
the old "ronamatic" logic from Hypersearch V1. Variables which are lists of
choices (i.e. string values, integer values that represent different
categories) are searched using the ronamatic logic whereas floats and
integers that represent a range of values are searched using PSO.
For prediction experiments, this implementation starts out evaluating only
single encoder models that encode the predicted field. This is the first
"sprint". Once it finds the optimum set of variables for that, it starts to
build up by adding in combinations of 2 fields (the second "sprint"), where
one of them is the predicted field. Once the top 2-field combination(s) are
discovered, it starts to build up on those by adding in a 3rd field, etc.
Each new set of field combinations is called a sprint.
For classification experiments, this implementation starts out evaluating two
encoder models, where one of the encoders is the classified field. This is the
first "sprint". Once it finds the optimum set of variables for that, it starts
to build up by evauating combinations of 3 fields (the second "sprint"), where
two of them are the best 2 fields found in the first sprint (one of those of
course being the classified field). Once the top 3-field combination(s) are
discovered, it starts to build up on those by adding in a 4th field, etc.
In classification models, the classified field, although it has an encoder, is
not sent "into" the network. Rather, the encoded value just goes directly to
the classifier as the classifier input.
At any one time, there are 1 or more swarms being evaluated at the same time -
each swarm representing a certain field combination within the sprint. We try
to load balance the swarms and have the same number of models evaluated for
each swarm at any one time. Each swarm contains N particles, and we also try
to keep N >= some mininum number. Each position of a particle corresponds to a
model.
When a worker is ready to evaluate a new model, it first picks the swarm with
the least number of models so far (least number of evaluated particle
positions). If that swarm does not have the min number of particles in it yet,
or does not yet have a particle created by this worker, the worker will create
a new particle, else it will choose another particle from that swarm that it
had created in the past which has the least number of evaluated positions so
far.
"""
def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,
logLevel=None):
"""Instantiate the HyperseachV2 instance.
Parameters:
----------------------------------------------------------------------
searchParams: a dict of the job's search parameters. The format is:
persistentJobGUID: REQUIRED.
Persistent, globally-unique identifier for this job
for use in constructing persistent model checkpoint
keys. MUST be compatible with S3 key-naming rules, but
MUST NOT contain forward slashes. This GUID is
expected to retain its global uniqueness across
clusters and cluster software updates (unlike the
record IDs in the Engine's jobs table, which recycle
upon table schema change and software update). In the
future, this may also be instrumental for checkpoint
garbage collection.
permutationsPyFilename:
OPTIONAL - path to permutations.py file
permutationsPyContents:
OPTIONAL - JSON encoded string with
contents of permutations.py file
descriptionPyContents:
OPTIONAL - JSON encoded string with
contents of base description.py file
description: OPTIONAL - JSON description of the search
createCheckpoints: OPTIONAL - Whether to create checkpoints
useTerminators OPTIONAL - True of False (default config.xml). When set
to False, the model and swarm terminators
are disabled
maxModels: OPTIONAL - max # of models to generate
NOTE: This is a deprecated location for this
setting. Now, it should be specified through
the maxModels variable within the permutations
file, or maxModels in the JSON description
dummyModel: OPTIONAL - Either (True/False) or a dict of parameters
for a dummy model. If this key is absent,
a real model is trained.
See utils.py/OPFDummyModel runner for the
schema of the dummy parameters
speculativeParticles OPTIONAL - True or False (default obtained from
nupic.hypersearch.speculative.particles.default
configuration property). See note below.
NOTE: The caller must provide just ONE of the following to describe the
hypersearch:
1.) permutationsPyFilename
OR 2.) permutationsPyContents & permutationsPyContents
OR 3.) description
The schema for the description element can be found at:
"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json"
NOTE about speculativeParticles: If true (not 0), hypersearch workers will
go ahead and create and run particles in subsequent sprints and
generations before the current generation or sprint has been completed. If
false, a worker will wait in a sleep loop until the current generation or
sprint has finished before choosing the next particle position or going
into the next sprint. When true, the best model can be found faster, but
results are less repeatable due to the randomness of when each worker
completes each particle. This property can be overridden via the
speculativeParticles element of the Hypersearch job params.
workerID: our unique Hypersearch worker ID
cjDAO: ClientJobsDB Data Access Object
jobID: job ID for this hypersearch job
logLevel: override logging level to this value, if not None
"""
# Instantiate our logger
self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
# Override log level?
if logLevel is not None:
self.logger.setLevel(logLevel)
# This is how to check the logging level
#if self.logger.getEffectiveLevel() <= logging.DEBUG:
# print "at debug level"
# Init random seed
random.seed(42)
# Save the search info
self._searchParams = searchParams
self._workerID = workerID
self._cjDAO = cjDAO
self._jobID = jobID
# Log search params
self.logger.info("searchParams: \n%s" % (pprint.pformat(
clippedObj(searchParams))))
self._createCheckpoints = self._searchParams.get('createCheckpoints',
False)
self._maxModels = self._searchParams.get('maxModels', None)
if self._maxModels == -1:
self._maxModels = None
self._predictionCacheMaxRecords = self._searchParams.get('predictionCacheMaxRecords', None)
# Speculative particles?
self._speculativeParticles = self._searchParams.get('speculativeParticles',
bool(int(Configuration.get(
'nupic.hypersearch.speculative.particles.default'))))
self._speculativeWaitSecondsMax = float(Configuration.get(
'nupic.hypersearch.speculative.particles.sleepSecondsMax'))
# Maximum Field Branching
self._maxBranching= int(Configuration.get(
'nupic.hypersearch.max.field.branching'))
# Minimum Field Contribution
self._minFieldContribution= float(Configuration.get(
'nupic.hypersearch.min.field.contribution'))
# This gets set if we detect that the job got cancelled
self._jobCancelled = False
# Use terminators (typically set by GrokRunPermuations.py)
if 'useTerminators' in self._searchParams:
useTerminators = self._searchParams['useTerminators']
useTerminators = str(int(useTerminators))
Configuration.set('nupic.hypersearch.enableModelTermination', useTerminators)
Configuration.set('nupic.hypersearch.enableModelMaturity', useTerminators)
Configuration.set('nupic.hypersearch.enableSwarmTermination', useTerminators)
# Special test mode?
if 'NTA_TEST_exitAfterNModels' in os.environ:
self._maxModels = int(os.environ['NTA_TEST_exitAfterNModels'])
self._dummyModel = self._searchParams.get('dummyModel', None)
# Holder for temporary directory, if any, that needs to be cleaned up
# in our close() method.
self._tempDir = None
try:
# Get the permutations info. This can be either:
# 1.) JSON encoded search description (this will be used to generate a
# permutations.py and description.py files using ExpGenerator)
# 2.) path to a pre-generated permutations.py file. The description.py is
# assumed to be in the same directory
# 3.) contents of the permutations.py and descrption.py files.
if 'description' in self._searchParams:
if ('permutationsPyFilename' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
# Calculate training period for anomaly models
searchParamObj = self._searchParams
anomalyParams = searchParamObj['description'].get('anomalyParams',
dict())
# This is used in case searchParamObj['description']['anomalyParams']
# is set to None.
if anomalyParams is None:
anomalyParams = dict()
if (('autoDetectWaitRecords' not in anomalyParams) or
(anomalyParams['autoDetectWaitRecords'] is None)):
streamDef = self._getStreamDef(searchParamObj['description'])
from nupic.data.stream_reader import StreamReader
try:
streamReader = StreamReader(streamDef, isBlocking=False,
maxTimeout=0, eofOnTimeout=True)
anomalyParams['autoDetectWaitRecords'] = \
streamReader.getDataRowCount()
except Exception:
anomalyParams['autoDetectWaitRecords'] = None
self._searchParams['description']['anomalyParams'] = anomalyParams
# Call the experiment generator to generate the permutations and base
# description file.
outDir = self._tempDir = tempfile.mkdtemp()
expGenerator([
'--description=%s' % (
json.dumps(self._searchParams['description'])),
'--version=v2',
'--outDir=%s' % (outDir)])
# Get the name of the permutations script.
permutationsScript = os.path.join(outDir, 'permutations.py')
elif 'permutationsPyFilename' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or "
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
permutationsScript = self._searchParams['permutationsPyFilename']
elif 'permutationsPyContents' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyFilename' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
assert ('descriptionPyContents' in self._searchParams)
# Generate the permutations.py and description.py files
outDir = self._tempDir = tempfile.mkdtemp()
permutationsScript = os.path.join(outDir, 'permutations.py')
fd = open(permutationsScript, 'w')
fd.write(self._searchParams['permutationsPyContents'])
fd.close()
fd = open(os.path.join(outDir, 'description.py'), 'w')
fd.write(self._searchParams['descriptionPyContents'])
fd.close()
else:
raise RuntimeError ("Either 'description' or 'permutationsScript' must be"
"specified")
# Get the base path of the experiment and read in the base description
self._basePath = os.path.dirname(permutationsScript)
self._baseDescription = open(os.path.join(self._basePath,
'description.py')).read()
self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()
# Read the model config to figure out the inference type
modelDescription, _ = opfhelpers.loadExperiment(self._basePath)
# Read info from permutations file. This sets up the following member
# variables:
# _predictedField
# _permutations
# _flattenedPermutations
# _encoderNames
# _reportKeys
# _filterFunc
# _optimizeKey
# _maximize
# _dummyModelParamsFunc
self._readPermutationsFile(permutationsScript, modelDescription)
# Fill in and save the base description and permutations file contents
# if they haven't already been filled in by another worker
if self._cjDAO is not None:
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genBaseDescription',
curValue=None,
newValue = self._baseDescription)
if updated:
permContents = open(permutationsScript).read()
self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genPermutations',
curValue=None,
newValue = permContents)
# if user provided an artificialMetric, force use of the dummy model
if self._dummyModelParamsFunc is not None:
if self._dummyModel is None:
self._dummyModel = dict()
# If at DEBUG log level, print out permutations info to the log
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "Permutations file specifications: "
info = dict()
for key in ['_predictedField', '_permutations',
'_flattenedPermutations', '_encoderNames',
'_reportKeys', '_optimizeKey', '_maximize']:
info[key] = getattr(self, key)
print >> msg, pprint.pformat(info)
self.logger.debug(msg.getvalue())
msg.close()
# Instantiate our database to hold the results we received so far
self._resultsDB = ResultsDB(self)
# Instantiate the Swarm Terminator
self._swarmTerminator = SwarmTerminator()
# Initial hypersearch state
self._hsState = None
# The Max # of attempts we will make to create a unique model before
# giving up.
self._maxUniqueModelAttempts = int(Configuration.get(
'nupic.hypersearch.maxUniqueModelAttempts'))
# The max amount of time allowed before a model is considered orphaned.
self._modelOrphanIntervalSecs = float(Configuration.get(
'nupic.hypersearch.modelOrphanIntervalSecs'))
# The max percent of models that can complete with errors
self._maxPctErrModels = float(Configuration.get(
'nupic.hypersearch.maxPctErrModels'))
except:
# Clean up our temporary directory, if any
if self._tempDir is not None:
shutil.rmtree(self._tempDir)
self._tempDir = None
raise
return
def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef
def __del__(self):
"""Destructor; NOTE: this is not guaranteed to be called (bugs like
circular references could prevent it from being called).
"""
self.close()
return
def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate CLA model results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicity wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
dictutils.rApply(self._permutations, _flattenPermutations)
def getExpectedNumModels(self):
"""Computes the number of models that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of Grok RunPermutations (RP) for use in progress
reporting.
Parameters:
---------------------------------------------------------
retval: The total number of expected models, if known; -1 if unknown
"""
return -1
def getModelNames(self):
"""Generates a list of model names that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of Grok RunPermutations (RP).
Parameters:
---------------------------------------------------------
retval: List of model names for this HypersearchV2 instance, or
None of not applicable
"""
return None
def getPermutationVariables(self):
"""Returns a dictionary of permutation variables.
Parameters:
---------------------------------------------------------
retval: A dictionary of permutation variables; keys are
flat permutation variable names and each value is
a sub-class of PermuteVariable.
"""
return self._flattenedPermutations
def getComplexVariableLabelLookupDict(self):
"""Generates a lookup dictionary of permutation variables whose values
are too complex for labels, so that artificial labels have to be generated
for them.
Parameters:
---------------------------------------------------------
retval: A look-up dictionary of permutation
variables whose values are too complex for labels, so
artificial labels were generated instead (e.g., "Choice0",
"Choice1", etc.); the key is the name of the complex variable
and the value is:
dict(labels=<list_of_labels>, values=<list_of_values>).
"""
raise NotImplementedError
def getOptimizationMetricInfo(self):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
return (self._optimizeKey, self._maximize)
def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0)
def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4)))
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None)
# END: while True:
# No success in this sprint, into next sprint
def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True
def killSwarmParticles(self, swarmID):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmID, completed=False)
for modelId in modelIds:
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmID,
str(modelIds)))
self._cjDAO.modelSetFields(
modelId, dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged=True)
def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = dictutils.rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults)
def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID))
def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e)
| Petr-Kovalev/nupic-win32 | py/nupic/swarming/HypersearchV2.py | Python | gpl-3.0 | 169,598 |
from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
| tiangolo/fastapi | tests/test_tutorial/test_request_files/test_tutorial001.py | Python | mit | 6,215 |
import os
import uuid
import zipfile
def addCFilepath(filename, fType = "HTML"):
actualFile = os.path.basename(filename)
if fType == "HTML":
return "Text/"+str(actualFile)
elif fType == "IMG":
return "Images/"+str(actualFile)
class Chapter:
def __init__(self):
self.id = ''
self.src = ''
self.directory =''
self.html = ''
class TocItem:
def __init__(self, playOrder):
self.play = playOrder
self.title = ""
self.src = ""
self.depth = 0
self.id = ""
class imgItem:
def __init__(self):
self.src = ''
self.id = ''
self.directory = ''
self.type = 'images/jpeg'
class Epub:
def __init__(self):
self.author = None
self.meta = []
self.creationDate = None
self.publisher = None
self.language = "US/EN"
self.rights = None
self.title = "Default"
self.img = []
self.html = []
self.cover = None
self.coverXHTML = None
self.toc = []
self.spine = []
self.contOpf = None
self.tocNcx = None
def addAuthor(self, author):
self.author = author
def addTitle(self, title):
self.title = title
def addCreationDate(self,cDate):
self.creationDate = cDate
def addPublisher(self, pub):
self.publisher = pub
def addLanguage(self, lan):
self.language = lan
def addRights(self, rights):
self.rights = rights
def addCover(self, cover):
img = imgItem()
img.src = addCFilepath(cover, 'IMG')
img.id = os.path.basename(cover)
img.directory = cover
self.cover = img
xhtml = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Cover</title>
<style type="text/css"> img { max-width: 100%; } </style>
</head>
<body>
<img src="../Images/Cover.jpg"/>
</body>
</html>"""
self.coverXHTML = xhtml
def addHTML(self, filepath, title = "Chapter"):
#Create an instiance of the Chapter class, give it data, and then append it
item = Chapter()
item.id = "ch"+str(len(self.html)+1)
#What the id is for each item doesn't actually matter, so for simplicities sake I'll just use ch0,ch1,ch2...
item.src = addCFilepath(filepath)
#The src for both content and toc files are relative
item.directory = filepath
item.html = "FUCKPYTHONENCODING"
self.html.append(item)
toc = TocItem(len(self.toc))
toc.title = title
#Note that os.path.basename does NOT work correctly on linux. You'll need to us an alternative method.
toc.src = addCFilepath(filepath)
toc.id = "ch"+str(len(self.html))
self.toc.append(toc)
def addIMG(self, filepath):
img = imgItem()
img.src = addCFilepath(filepath, 'IMG')
img.id = os.path.basename(filepath)
img.directory = filepath
self.img.append(img)
def createToc(self):
head = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1">
<head>
<meta name="dtb:uid" content="urn:uuid:{}"/>
<meta name="dtb:depth" content="1"/>
<meta name="dtb:totalPageCount" content="0"/>
<meta name="dtb:maxPageNumber" content="0"/>
</head>
<docTitle>
<text>{}</text>
</docTitle>
<navMap>""".format(str(uuid.uuid4()), self.title)
for tocEntry in self.toc:
head += """\n <navPoint id="navPoint-{}" playOrder="{}">
<navLabel>
<text>{}</text>
</navLabel>
<content src="{}"/>
</navPoint>""".format(tocEntry.play, str(tocEntry.play+1), tocEntry.title, tocEntry.src)
head += """\n </navMap>
</ncx>"""
self.tocNcx = head
def createContent(self):
head = """<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="BookId" version="2.0">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc:identifier id="BookId" opf:scheme="UUID">urn:uuid:{}</dc:identifier>""".format(str(uuid.uuid4()))
if self.title:
head += '\n <dc:title>{}</dc:title>'.format(self.title)
if self.author:
head += '\n <dc:creator opf:file-as="{}" opf:role="aut">{}</dc:creator>'.format(self.author, self.author)
if self.publisher:
head += '\n <dc:publisher>{}</dc:publisher>'.format(self.publisher)
if self.language:
head += '\n <dc:language>{}</dc:language>'.format(self.language)
if self.rights:
head += '\n <dc:rights>{}</dc:rights>'.format(self.rights)
head += '\n <meta content="Cover.jpg" name="cover"/>'
head += """\n </metadata>
<manifest>
<item href="toc.ncx" id="ncx" media-type="application/x-dtbncx+xml"/>"""
if self.cover:
head += '\n <item href="Text/Cover.xhtml" id="Cover.xhtml" media-type="application/xhtml+xml"/>'
for htmlEntry in self.html:
head += '\n <item href="{}" id="{}" media-type="application/xhtml+xml"/>'.format(htmlEntry.src, htmlEntry.id)
if self.cover:
head += '\n <item href="Images/Cover.jpg" id="Cover.jpg" media-type="image/jpeg"/>'
for imgEntry in self.img:
head += '\n <item href="{}" id="{}" media-type="image/jpeg"/>'.format(imgEntry.src, imgEntry.id)
head += """\n </manifest>
<spine toc="ncx">
<item idref="Cover.xhtml"/>"""
for tocEntry in self.toc:
head += '\n <itemref idref="{}"/>'.format(tocEntry.id)
head += """\n </spine>
<guide>
<reference href="Text/Cover.xhtml" title="Cover" type="cover"/>
</guide>
</package>"""
self.contOpf = head
def printhead(self):
print("HEAD")
def createEpub(self):
zipF = zipfile.ZipFile(self.title+".epub", 'w')
zipF.write('mimetype')
zipF.write('container.xml', 'META-INF/container.xml')
if self.cover:
zipF.write(self.cover.directory, 'OEBPS/Images/Cover.jpg')
zipF.writestr('OEBPS/Text/Cover.xhtml', self.coverXHTML)
for htmlEntry in self.html:
zipF.write(htmlEntry.directory, 'OEBPS/'+htmlEntry.src)
for imgEntry in self.img:
zipF.write(imgEntry.directory, 'OEBPS/'+imgEntry.src)
Epub.createContent(self)
Epub.createToc(self)
zipF.writestr('OEBPS/content.opf', self.contOpf)
zipF.writestr('OEBPS/toc.ncx', self.tocNcx)
zipF.close()
| flyingjam/epubmaker | epub.py | Python | mit | 6,149 |
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.contrib import admin
admin.autodiscover()
from account.openid_consumer import PinaxConsumer
from menu.models import Menu_Item
if settings.ACCOUNT_OPEN_SIGNUP:
signup_view = "account.views.signup"
else:
signup_view = "signup_codes.views.signup"
urlpatterns = patterns('',
url(r'^$', direct_to_template, {
"template": "homepage.html",
"extra_context": {
"menu_items": Menu_Item.objects.all(),
}
}, name="home"),
url(r'^admin/invite_user/$', 'signup_codes.views.admin_invite_user', name="admin_invite_user"),
url(r'^account/signup/$', signup_view, name="acct_signup"),
(r'^about/', include('about.urls')),
(r'^account/', include('account.urls')),
(r'^openid/(.*)', PinaxConsumer()),
(r'^profiles/', include('delegate.urls')),
(r'^notices/', include('notification.urls')),
(r'^comments/', include('threadedcomments.urls')),
(r'^announcements/', include('announcements.urls')),
(r'^blog/', include('blog_wrapper.urls')),
(r'^talk/', include('talk.urls')),
(r'^statistics/', include('statistics.urls')),
(r'^schedule/', include('schedule.urls')),
(r'^admin/(.*)', admin.site.root),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns('',
(r'^site_media/', include('staticfiles.urls')),
)
| theju/confista | urls.py | Python | mit | 1,479 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Volume.project'
db.add_column('db_volume', 'project',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Volume.project'
db.delete_column('db_volume', 'project')
models = {
'db.backend': {
'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'volume_type'),)", 'object_name': 'Flavor'},
'allow_create': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'volume_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flavors'", 'on_delete': 'models.PROTECT', 'to': "orm['db.VolumeType']"})
},
'db.ipaddress': {
'Meta': {'unique_together': "(('network', 'address', 'deleted'),)", 'object_name': 'IPAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipversion': ('django.db.models.fields.IntegerField', [], {}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'nic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.NetworkInterface']"}),
'project': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Subnet']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ipaddresslog': {
'Meta': {'object_name': 'IPAddressLog'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'allocated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network_id': ('django.db.models.fields.IntegerField', [], {}),
'released_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'server_id': ('django.db.models.fields.IntegerField', [], {})
},
'db.ippooltable': {
'Meta': {'object_name': 'IPPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ip_pools'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Subnet']"})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'external_router': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flavor': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'floating_ip_pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'network'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'subnet_ids': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'device_owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.VirtualMachine']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'security_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.SecurityGroup']", 'null': 'True', 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'ACTIVE'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.quotaholderserial': {
'Meta': {'ordering': "['serial']", 'object_name': 'QuotaHolderSerial'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
'db.securitygroup': {
'Meta': {'object_name': 'SecurityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'db.subnet': {
'Meta': {'object_name': 'Subnet'},
'cidr': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_nameservers': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'host_routes': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipversion': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subnets'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']", 'on_delete': 'models.PROTECT'}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'BUILD'", 'max_length': '30'}),
'project': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'task_job_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'db.virtualmachinediagnostic': {
'Meta': {'ordering': "['-created']", 'object_name': 'VirtualMachineDiagnostic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diagnostics'", 'to': "orm['db.VirtualMachine']"}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
},
'db.volume': {
'Meta': {'object_name': 'Volume'},
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'delete_on_termination': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'volumes'", 'null': 'True', 'to': "orm['db.VirtualMachine']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'snapshot_counter': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'CREATING'", 'max_length': '64'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'volume_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'volumes'", 'on_delete': 'models.PROTECT', 'to': "orm['db.VolumeType']"})
},
'db.volumemetadata': {
'Meta': {'unique_together': "(('volume', 'key'),)", 'object_name': 'VolumeMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'volume': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.Volume']"})
},
'db.volumetype': {
'Meta': {'object_name': 'VolumeType'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['db'] | grnet/synnefo | snf-cyclades-app/synnefo/db/migrations/old/0110_auto__add_field_volume_project.py | Python | gpl-3.0 | 23,262 |
#!/usr/bin/python
# Look in ~/.boto for EC2Connection info (keys, region, etc.)
import boto
from boto import ec2
console = boto.ec2.EC2Connection()
snapshots = console.get_all_snapshots(owner='self')
for snapshot in snapshots:
print snapshot
print 'ID: %s' % snapshot.id # The unique ID of the volume.
print 'Start Time: %s' % snapshot.start_time # The creation time?
print 'Description: %s' % snapshot.description
print 'Tags: %s' % snapshot.tags
print 'Volume ID %s' % snapshot.volume_id
print 'Volume Size: %s' % snapshot.volume_size
print '------------------------'
| managedkaos/aws-scripts | get_snapshot_info.py | Python | mit | 607 |
class Stacker:
'''Allow stacking element creation with "with" statements
eg.
s = Stacker(self.rdoc.body)
with s.stack('div', cls='panel'):
s.stack('div', text='Hello', cls='panel-heading')
with s.stack('div', cls='panel-body'):
s.stack(p="this is text inside body")
s.stack('button', cls='btn btn-primary')
s.stack('div', text="panel footer here", cls="panel-footer")
'''
def __init__(self, element, prev_stacker=None):
# proxy everything to element - copy __dict__ and __class__
self.__class__ = type(element.__class__.__name__,
(self.__class__, element.__class__),
{})
self.__dict__ = element.__dict__
if prev_stacker:
self._stack = prev_stacker._stack
else:
self._stack = [element]
self._element = element
def stack(self, *args, **kwargs):
'''Create an element - parent is head of stack'''
parent = self._stack[-1]
e = parent.element(*args, **kwargs)
se = self.__class__(e, self)
return se
def __enter__(self, **kwargs):
self._stack.append(self._element)
return self
def __exit__(self, type, value, traceback):
self._stack.pop()
class StackerWrapper:
'''Wrapper for stacker object
'''
def __init__(self, stacker):
self.stacker = stacker
def stack(self, *args, **kwargs):
return self.stacker.stack(*args, **kwargs)
class HtmlShortcuts(StackerWrapper):
'''Wrapper for Stacker that provides Html entities shortcuts
eg.
h = HtmlShortcuts(Stacker(self.rdoc.body))
with h.div(cls='panel'):
h.div(text='hello', cls='panel-heading')
with h.div(cls='panel-body'):
h.p('this is text inside body')
h.button(cls='btn btn-primary')
h.div(text='panel footer here', cls=panel-footer')
'''
###############################################################################
# "Shortcut" methods for element types
# Sections
def section(self, *args, **kwargs):
'''
The section element represents a generic section of a document or
application. A section, in this context, is a thematic grouping of content,
typically with a heading.
'''
return self.stack(typ="section", *args, **kwargs)
def nav(self, *args, **kwargs):
'''
The nav element represents a section of a page that links to other pages or
to parts within the page: a section with navigation links.
'''
return self.stack(typ="nav", *args, **kwargs)
def article(self, *args, **kwargs):
'''
The article element represents a self-contained composition in a document,
page, application, or site and that is, in principle, independently
distributable or reusable, e.g. in syndication. This could be a forum post, a
magazine or newspaper article, a blog entry, a user-submitted comment, an
interactive widget or gadget, or any other independent item of content.
'''
return self.stack(typ="article", *args, **kwargs)
def aside(self, *args, **kwargs):
'''
The aside element represents a section of a page that consists of content
that is tangentially related to the content around the aside element, and
which could be considered separate from that content. Such sections are
often represented as sidebars in printed typography.
'''
return self.stack(typ="aside", *args, **kwargs)
def h1(self, *args, **kwargs):
'''
Represents the highest ranking heading.
'''
return self.stack(typ="h1", *args, **kwargs)
def h2(self, *args, **kwargs):
'''
Represents the second-highest ranking heading.
'''
return self.stack(typ="h2", *args, **kwargs)
def h3(self, *args, **kwargs):
'''
Represents the third-highest ranking heading.
'''
return self.stack(typ="h3", *args, **kwargs)
def h4(self, *args, **kwargs):
'''
Represents the fourth-highest ranking heading.
'''
return self.stack(typ="h4", *args, **kwargs)
def h5(self, *args, **kwargs):
'''
Represents the fifth-highest ranking heading.
'''
return self.stack(typ="h5", *args, **kwargs)
def h6(self, *args, **kwargs):
'''
Represents the sixth-highest ranking heading.
'''
return self.stack(typ="h6", *args, **kwargs)
def hgroup(self, *args, **kwargs):
'''
The hgroup element represents the heading of a section. The element is used
to group a set of h1-h6 elements when the heading has multiple levels, such
as subheadings, alternative titles, or taglines.
'''
return self.stack(typ="hgroup", *args, **kwargs)
def header(self, *args, **kwargs):
'''
The header element represents a group of introductory or navigational aids.
'''
return self.stack(typ="header", *args, **kwargs)
def footer(self, *args, **kwargs):
'''
The footer element represents a footer for its nearest ancestor sectioning
content or sectioning root element. A footer typically contains information
about its section such as who wrote it, links to related documents,
copyright data, and the like.
'''
return self.stack(typ="footer", *args, **kwargs)
def address(self, *args, **kwargs):
'''
The address element represents the contact information for its nearest
article or body element ancestor. If that is the body element, then the
contact information applies to the document as a whole.
'''
return self.stack(typ="address", *args, **kwargs)
# Grouping content
def p(self, *args, **kwargs):
'''
The p element represents a paragraph.
'''
return self.stack(typ="p", *args, **kwargs)
def hr(self, *args, **kwargs):
'''
The hr element represents a paragraph-level thematic break, e.g. a scene
change in a story, or a transition to another topic within a section of a
reference book.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="hr", *args, **kwargs)
def pre(self, *args, **kwargs):
'''
The pre element represents a block of preformatted text, in which structure
is represented by typographic conventions rather than by elements.
'''
kwargs["is_pretty"] = False
return self.stack(typ="pre", *args, **kwargs)
def blockquote(self, *args, **kwargs):
'''
The blockquote element represents a section that is quoted from another
source.
'''
return self.stack(typ="blockquote", *args, **kwargs)
def ol(self, *args, **kwargs):
'''
The ol element represents a list of items, where the items have been
intentionally ordered, such that changing the order would change the
meaning of the document.
'''
return self.stack(typ="ol", *args, **kwargs)
def ul(self, *args, **kwargs):
'''
The ul element represents a list of items, where the order of the items is
not important - that is, where changing the order would not materially change
the meaning of the document.
'''
return self.stack(typ="ul", *args, **kwargs)
def li(self, *args, **kwargs):
'''
The li element represents a list item. If its parent element is an ol, ul, or
menu element, then the element is an item of the parent element's list, as
defined for those elements. Otherwise, the list item has no defined
list-related relationship to any other li element.
'''
return self.stack(typ="li", *args, **kwargs)
def dl(self, *args, **kwargs):
'''
The dl element represents an association list consisting of zero or more
name-value groups (a description list). Each group must consist of one or
more names (dt elements) followed by one or more values (dd elements).
Within a single dl element, there should not be more than one dt element for
each name.
'''
return self.stack(typ="dl", *args, **kwargs)
def dt(self, *args, **kwargs):
'''
The dt element represents the term, or name, part of a term-description group
in a description list (dl element).
'''
return self.stack(typ="dt", *args, **kwargs)
def dd(self, *args, **kwargs):
'''
The dd element represents the description, definition, or value, part of a
term-description group in a description list (dl element).
'''
return self.stack(typ="dd", *args, **kwargs)
def figure(self, *args, **kwargs):
'''
The figure element represents some flow content, optionally with a caption,
that is self-contained and is typically referenced as a single unit from the
main flow of the document.
'''
return self.stack(typ="figure", *args, **kwargs)
def figcaption(self, *args, **kwargs):
'''
The figcaption element represents a caption or legend for the rest of the
contents of the figcaption element's parent figure element, if any.
'''
return self.stack(typ="figcaption", *args, **kwargs)
def div(self, *args, **kwargs):
'''
The div element has no special meaning at all. It represents its children. It
can be used with the class, lang, and title attributes to mark up semantics
common to a group of consecutive elements.
'''
return self.stack(typ="div", *args, **kwargs)
# Text semantics
def a(self, *args, **kwargs):
'''
If the a element has an href attribute, then it represents a hyperlink (a
hypertext anchor).
If the a element has no href attribute, then the element represents a
placeholder for where a link might otherwise have been placed, if it had been
relevant.
'''
return self.stack(typ="a", *args, **kwargs)
def em(self, *args, **kwargs):
'''
The em element represents stress emphasis of its contents.
'''
return self.stack(typ="em", *args, **kwargs)
def strong(self, *args, **kwargs):
'''
The strong element represents strong importance for its contents.
'''
return self.stack(typ="strong", *args, **kwargs)
def small(self, *args, **kwargs):
'''
The small element represents side comments such as small print.
'''
return self.stack(typ="small", *args, **kwargs)
def s(self, *args, **kwargs):
'''
The s element represents contents that are no longer accurate or no longer
relevant.
'''
return self.stack(typ="s", *args, **kwargs)
def cite(self, *args, **kwargs):
'''
The cite element represents the title of a work (e.g. a book, a paper, an
essay, a poem, a score, a song, a script, a film, a TV show, a game, a
sculpture, a painting, a theatre production, a play, an opera, a musical, an
exhibition, a legal case report, etc). This can be a work that is being
quoted or referenced in detail (i.e. a citation), or it can just be a work
that is mentioned in passing.
'''
return self.stack(typ="cite", *args, **kwargs)
def q(self, *args, **kwargs):
'''
The q element represents some phrasing content quoted from another source.
'''
return self.stack(typ="q", *args, **kwargs)
def dfn(self, *args, **kwargs):
'''
The dfn element represents the defining instance of a term. The paragraph,
description list group, or section that is the nearest ancestor of the dfn
element must also contain the definition(s) for the term given by the dfn
element.
'''
return self.stack(typ="dfn", *args, **kwargs)
def abbr(self, *args, **kwargs):
'''
The abbr element represents an abbreviation or acronym, optionally with its
expansion. The title attribute may be used to provide an expansion of the
abbreviation. The attribute, if specified, must contain an expansion of the
abbreviation, and nothing else.
'''
return self.stack(typ="abbr", *args, **kwargs)
def time_(self, *args, **kwargs):
'''
The time element represents either a time on a 24 hour clock, or a precise
date in the proleptic Gregorian calendar, optionally with a time and a
time-zone offset.
'''
return self.stack(typ="time_", *args, **kwargs)
_time = time_
def code(self, *args, **kwargs):
'''
The code element represents a fragment of computer code. This could be an XML
element name, a filename, a computer program, or any other string that a
computer would recognize.
'''
return self.stack(typ="code", *args, **kwargs)
def var(self, *args, **kwargs):
'''
The var element represents a variable. This could be an actual variable in a
mathematical expression or programming context, an identifier representing a
constant, a function parameter, or just be a term used as a placeholder in
prose.
'''
return self.stack(typ="var", *args, **kwargs)
def samp(self, *args, **kwargs):
'''
The samp element represents (sample) output from a program or computing
system.
'''
return self.stack(typ="samp", *args, **kwargs)
def kbd(self, *args, **kwargs):
'''
The kbd element represents user input (typically keyboard input, although it
may also be used to represent other input, such as voice commands).
'''
return self.stack(typ="kbd", *args, **kwargs)
def sub(self, *args, **kwargs):
'''
The sub element represents a subscript.
'''
return self.stack(typ="sub", *args, **kwargs)
def sup(self, *args, **kwargs):
'''
The sup element represents a superscript.
'''
return self.stack(typ="sup", *args, **kwargs)
def i(self, *args, **kwargs):
'''
The i element represents a span of text in an alternate voice or mood, or
otherwise offset from the normal prose in a manner indicating a different
quality of text, such as a taxonomic designation, a technical term, an
idiomatic phrase from another language, a thought, or a ship name in Western
texts.
'''
return self.stack(typ="i", *args, **kwargs)
def b(self, *args, **kwargs):
'''
The b element represents a span of text to which attention is being drawn for
utilitarian purposes without conveying any extra importance and with no
implication of an alternate voice or mood, such as key words in a document
abstract, product names in a review, actionable words in interactive
text-driven software, or an article lede.
'''
return self.stack(typ="b", *args, **kwargs)
def u(self, *args, **kwargs):
'''
The u element represents a span of text with an unarticulated, though
explicitly rendered, non-textual annotation, such as labeling the text as
being a proper name in Chinese text (a Chinese proper name mark), or
labeling the text as being misspelt.
'''
return self.stack(typ="u", *args, **kwargs)
def mark(self, *args, **kwargs):
'''
The mark element represents a run of text in one document marked or
highlighted for reference purposes, due to its relevance in another context.
When used in a quotation or other block of text referred to from the prose,
it indicates a highlight that was not originally present but which has been
added to bring the reader's attention to a part of the text that might not
have been considered important by the original author when the block was
originally written, but which is now under previously unexpected scrutiny.
When used in the main prose of a document, it indicates a part of the
document that has been highlighted due to its likely relevance to the user's
current activity.
'''
return self.stack(typ="mark", *args, **kwargs)
def ruby(self, *args, **kwargs):
'''
The ruby element allows one or more spans of phrasing content to be marked
with ruby annotations. Ruby annotations are short runs of text presented
alongside base text, primarily used in East Asian typography as a guide for
pronunciation or to include other annotations. In Japanese, this form of
typography is also known as furigana.
'''
return self.stack(typ="ruby", *args, **kwargs)
def rt(self, *args, **kwargs):
'''
The rt element marks the ruby text component of a ruby annotation.
'''
return self.stack(typ="rt", *args, **kwargs)
def rp(self, *args, **kwargs):
'''
The rp element can be used to provide parentheses around a ruby text
component of a ruby annotation, to be shown by user agents that don't support
ruby annotations.
'''
return self.stack(typ="rp", *args, **kwargs)
def bdi(self, *args, **kwargs):
'''
The bdi element represents a span of text that is to be isolated from its
surroundings for the purposes of bidirectional text formatting.
'''
return self.stack(typ="bdi", *args, **kwargs)
def bdo(self, *args, **kwargs):
'''
The bdo element represents explicit text directionality formatting control
for its children. It allows authors to override the Unicode bidirectional
algorithm by explicitly specifying a direction override.
'''
return self.stack(typ="bdo", *args, **kwargs)
def span(self, *args, **kwargs):
'''
The span element doesn't mean anything on its own, but can be useful when
used together with the global attributes, e.g. class, lang, or dir. It
represents its children.
'''
return self.stack(typ="span", *args, **kwargs)
def br(self, *args, **kwargs):
'''
The br element represents a line break.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="br", *args, **kwargs)
def wbr(self, *args, **kwargs):
'''
The wbr element represents a line break opportunity.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="wbr", *args, **kwargs)
# Edits
def ins(self, *args, **kwargs):
'''
The ins element represents an addition to the document.
'''
return self.stack(typ="ins", *args, **kwargs)
def del_(self, *args, **kwargs):
'''
The del element represents a removal from the document.
'''
return self.stack(typ="del_", *args, **kwargs)
# Embedded content
def img(self, *args, **kwargs):
'''
An img element represents an image.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="img", *args, **kwargs)
def iframe(self, *args, **kwargs):
'''
The iframe element represents a nested browsing context.
'''
return self.stack(typ="iframe", *args, **kwargs)
def embed(self, *args, **kwargs):
'''
The embed element represents an integration point for an external (typically
non-HTML) application or interactive content.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="embed", *args, **kwargs)
def object_(self, *args, **kwargs):
'''
The object element can represent an external resource, which, depending on
the type of the resource, will either be treated as an image, as a nested
browsing context, or as an external resource to be processed by a plugin.
'''
return self.stack(typ="object_", *args, **kwargs)
_object = object_
def param(self, *args, **kwargs):
'''
The param element defines parameters for plugins invoked by object elements.
It does not represent anything on its own.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="param", *args, **kwargs)
def video(self, *args, **kwargs):
'''
A video element is used for playing videos or movies, and audio files with
captions.
'''
return self.stack(typ="video", *args, **kwargs)
def audio(self, *args, **kwargs):
'''
An audio element represents a sound or audio stream.
'''
return self.stack(typ="audio", *args, **kwargs)
def source(self, *args, **kwargs):
'''
The source element allows authors to specify multiple alternative media
resources for media elements. It does not represent anything on its own.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="source", *args, **kwargs)
def track(self, *args, **kwargs):
'''
The track element allows authors to specify explicit external timed text
tracks for media elements. It does not represent anything on its own.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="track", *args, **kwargs)
def canvas(self, *args, **kwargs):
'''
The canvas element provides scripts with a resolution-dependent bitmap
canvas, which can be used for rendering graphs, game graphics, or other
visual images on the fly.
'''
return self.stack(typ="canvas", *args, **kwargs)
def map_(self, *args, **kwargs):
'''
The map element, in conjunction with any area element descendants, defines an
image map. The element represents its children.
'''
return self.stack(typ="map_", *args, **kwargs)
def area(self, *args, **kwargs):
'''
The area element represents either a hyperlink with some text and a
corresponding area on an image map, or a dead area on an image map.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="area", *args, **kwargs)
# Tabular data
def table(self, *args, **kwargs):
'''
The table element represents data with more than one dimension, in the form
of a table.
'''
return self.stack(typ="table", *args, **kwargs)
def caption(self, *args, **kwargs):
'''
The caption element represents the title of the table that is its parent, if
it has a parent and that is a table element.
'''
return self.stack(typ="caption", *args, **kwargs)
def colgroup(self, *args, **kwargs):
'''
The colgroup element represents a group of one or more columns in the table
that is its parent, if it has a parent and that is a table element.
'''
return self.stack(typ="colgroup", *args, **kwargs)
def col(self, *args, **kwargs):
'''
If a col element has a parent and that is a colgroup element that itself has
a parent that is a table element, then the col element represents one or more
columns in the column group represented by that colgroup.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="col", *args, **kwargs)
def tbody(self, *args, **kwargs):
'''
The tbody element represents a block of rows that consist of a body of data
for the parent table element, if the tbody element has a parent and it is a
table.
'''
return self.stack(typ="tbody", *args, **kwargs)
def thead(self, *args, **kwargs):
'''
The thead element represents the block of rows that consist of the column
labels (headers) for the parent table element, if the thead element has a
parent and it is a table.
'''
return self.stack(typ="thead", *args, **kwargs)
def tfoot(self, *args, **kwargs):
'''
The tfoot element represents the block of rows that consist of the column
summaries (footers) for the parent table element, if the tfoot element has a
parent and it is a table.
'''
return self.stack(typ="tfoot", *args, **kwargs)
def tr(self, *args, **kwargs):
'''
The tr element represents a row of cells in a table.
'''
return self.stack(typ="tr", *args, **kwargs)
def td(self, *args, **kwargs):
'''
The td element represents a data cell in a table.
'''
return self.stack(typ="td", *args, **kwargs)
def th(self, *args, **kwargs):
'''
The th element represents a header cell in a table.
'''
return self.stack(typ="th", *args, **kwargs)
# Forms
def form(self, *args, **kwargs):
'''
The form element represents a collection of form-associated elements, some of
which can represent editable values that can be submitted to a server for
processing.
'''
return self.stack(typ="form", *args, **kwargs)
def fieldset(self, *args, **kwargs):
'''
The fieldset element represents a set of form controls optionally grouped
under a common name.
'''
return self.stack(typ="fieldset", *args, **kwargs)
def legend(self, *args, **kwargs):
'''
The legend element represents a caption for the rest of the contents of the
legend element's parent fieldset element, if any.
'''
return self.stack(typ="legend", *args, **kwargs)
def label(self, *args, **kwargs):
'''
The label represents a caption in a user interface. The caption can be
associated with a specific form control, known as the label element's labeled
control, either using for attribute, or by putting the form control inside
the label element itself.
'''
return self.stack(typ="label", *args, **kwargs)
def input_(self, *args, **kwargs):
'''
The input element represents a typed data field, usually with a form control
to allow the user to edit the data.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="input", *args, **kwargs)
input = _input = input_
def button(self, *args, **kwargs):
'''
The button element represents a button. If the element is not disabled, then
the user agent should allow the user to activate the button.
'''
return self.stack(typ="button", *args, **kwargs)
def select(self, *args, **kwargs):
'''
The select element represents a control for selecting amongst a set of
options.
'''
return self.stack(typ="select", *args, **kwargs)
def datalist(self, *args, **kwargs):
'''
The datalist element represents a set of option elements that represent
predefined options for other controls. The contents of the element represents
fallback content for legacy user agents, intermixed with option elements that
represent the predefined options. In the rendering, the datalist element
represents nothing and it, along with its children, should be hidden.
'''
return self.stack(typ="datalist", *args, **kwargs)
def optgroup(self, *args, **kwargs):
'''
The optgroup element represents a group of option elements with a common
label.
'''
return self.stack(typ="optgroup", *args, **kwargs)
def option(self, *args, **kwargs):
'''
The option element represents an option in a select element or as part of a
list of suggestions in a datalist element.
'''
return self.stack(typ="option", *args, **kwargs)
def textarea(self, *args, **kwargs):
'''
The textarea element represents a multiline plain text edit control for the
element's raw value. The contents of the control represent the control's
default value.
'''
return self.stack(typ="textarea", *args, **kwargs)
def keygen(self, *args, **kwargs):
'''
The keygen element represents a key pair generator control. When the
control's form is submitted, the private key is stored in the local keystore,
and the public key is packaged and sent to the server.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="keygen", *args, **kwargs)
def output(self, *args, **kwargs):
'''
The output element represents the result of a calculation.
'''
return self.stack(typ="output", *args, **kwargs)
def progress(self, *args, **kwargs):
'''
The progress element represents the completion progress of a task. The
progress is either indeterminate, indicating that progress is being made but
that it is not clear how much more work remains to be done before the task is
complete (e.g. because the task is waiting for a remote host to respond), or
the progress is a number in the range zero to a maximum, giving the fraction
of work that has so far been completed.
'''
return self.stack(typ="progress", *args, **kwargs)
def meter(self, *args, **kwargs):
'''
The meter element represents a scalar measurement within a known range, or a
fractional value; for example disk usage, the relevance of a query result, or
the fraction of a voting population to have selected a particular candidate.
'''
return self.stack(typ="meter", *args, **kwargs)
# Interactive elements
def details(self, *args, **kwargs):
'''
The details element represents a disclosure widget from which the user can
obtain additional information or controls.
'''
return self.stack(typ="details", *args, **kwargs)
def summary(self, *args, **kwargs):
'''
The summary element represents a summary, caption, or legend for the rest of
the contents of the summary element's parent details element, if any.
'''
return self.stack(typ="summary", *args, **kwargs)
def command(self, *args, **kwargs):
'''
The command element represents a command that the user can invoke.
'''
kwargs["is_single"] = True # TODO
return self.stack(typ="command", *args, **kwargs)
def menu(self, *args, **kwargs):
'''
The menu element represents a list of commands.
'''
return self.stack(typ="menu", *args, **kwargs)
| skariel/webalchemy | webalchemy/Stacker.py | Python | mit | 31,131 |
# -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Sale Order Lot Selection',
'version': '8.0.1.0.0',
'category': 'Sales Management',
'author': "Odoo Community Association (OCA), Agile Business Group",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
'depends': ['sale', 'sale_stock', 'procurement', 'stock'],
'data': ['view/sale_view.xml'],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Eficent/sale-workflow | sale_order_lot_selection/__openerp__.py | Python | agpl-3.0 | 1,890 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import loader, RequestContext
from django.views.decorators.http import require_POST
from pootle.core.decorators import permission_required
from pootle_misc.util import ajax_required, jsonify
from .decorators import get_goal, require_goal
from .forms import GoalForm
@require_POST
@ajax_required
@get_goal
@require_goal
@permission_required('administrate')
def ajax_edit_goal(request, goal):
"""Edit a goal through a form using AJAX."""
form = GoalForm(request.POST, instance=goal)
response = {}
rcode = 400
if form.is_valid():
form.save()
rcode = 200
if goal.description:
response["description"] = goal.description
else:
response["description"] = (u'<p class="placeholder muted">%s</p>' %
_(u"No description yet."))
context = {
'form': form,
'form_action': reverse('pootle-tagging-ajax-edit-goal',
args=[goal.slug]),
}
t = loader.get_template('admin/general_settings_form.html')
c = RequestContext(request, context)
response['form'] = t.render(c)
return HttpResponse(jsonify(response), status=rcode,
mimetype="application/json")
| arky/pootle-dev | pootle/apps/pootle_tagging/views.py | Python | gpl-2.0 | 2,088 |
# Copyright 2007 by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
PopGen: Population Genetics and Genomics library in Python
"""
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/PopGen/__init__.py | Python | gpl-2.0 | 293 |
import unittest
from TestBase import BaseClass
class Group:
def __init__(self, group_name, group_header, group_footer):
self.group_name = group_name
self.group_header = group_header
self.group_footer = group_footer
class GroupTestBase(BaseClass):
def create_group(self, wd, Group):
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(Group.group_name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(Group.group_header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(Group.group_footer)
wd.find_element_by_name("submit").click()
def delete_group(self):
self.wd.find_element_by_link_text("groups").click()
self.wd.find_element_by_css_selector("span.group").click()
if not self.wd.find_element_by_name("selected[]").is_selected():
self.wd.find_element_by_name("selected[]").click()
self.wd.find_element_by_xpath("//div[@id='content']/form/input[5]").click()
def click_group_page(self, wd):
wd.find_element_by_css_selector("div.msgbox").click()
wd.find_element_by_link_text("group page").click() | werbk/task---1.3 | tests_group/group_lib.py | Python | apache-2.0 | 1,510 |
from tornado.escape import url_escape, json_encode
from tornado.web import HTTPError
from moi import r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import send_email, execute_as_transaction
from qiita_core.exceptions import (IncorrectPasswordError, IncorrectEmailError,
UnverifiedEmailError)
from qiita_db.user import User
from qiita_db.exceptions import QiitaDBUnknownIDError, QiitaDBDuplicateError
# login code modified from https://gist.github.com/guillaumevincent/4771570
class AuthCreateHandler(BaseHandler):
"""User Creation"""
def get(self):
try:
error_message = self.get_argument("error")
# Tornado can raise an Exception directly, not a defined type
except:
error_message = ""
self.render("create_user.html", error=error_message)
@execute_as_transaction
def post(self):
username = self.get_argument("email", "").strip().lower()
password = self.get_argument("newpass", "")
info = {}
for info_column in ("name", "affiliation", "address", "phone"):
hold = self.get_argument(info_column, None)
if hold:
info[info_column] = hold
created = False
try:
created = User.create(username, password, info)
except QiitaDBDuplicateError:
msg = "Email already registered as a user"
if created:
info = created.info
try:
# qiita_config.base_url doesn't have a / at the end, but the
# qiita_config.portal_dir has it at the beginning but not at
# the end. This constructs the correct URL
url = qiita_config.base_url + qiita_config.portal_dir
send_email(username, "QIITA: Verify Email Address", "Please "
"click the following link to verify email address: "
"%s/auth/verify/%s?email=%s"
% (url, info['user_verify_code'],
url_escape(username)))
except:
msg = ("Unable to send verification email. Please contact the "
"qiita developers at <a href='mailto:qiita-help"
"@gmail.com'>[email protected]</a>")
self.redirect(u"%s/?level=danger&message=%s"
% (qiita_config.portal_dir, url_escape(msg)))
return
self.redirect(u"%s/" % qiita_config.portal_dir)
else:
error_msg = u"?error=" + url_escape(msg)
self.redirect(u"%s/auth/create/%s"
% (qiita_config.portal_dir, error_msg))
class AuthVerifyHandler(BaseHandler):
def get(self, code):
email = self.get_argument("email").strip().lower()
if User.verify_code(email, code, "create"):
msg = "Successfully verified user! You are now free to log in."
color = "black"
r_client.zadd('qiita-usernames', email, 0)
else:
msg = "Code not valid!"
color = "red"
self.render("user_verified.html", msg=msg, color=color,
email=self.get_argument("email").strip())
class AuthLoginHandler(BaseHandler):
"""user login, no page necessary"""
def get(self):
self.redirect("%s/" % qiita_config.portal_dir)
@execute_as_transaction
def post(self):
if r_client.get('maintenance') is not None:
raise HTTPError(503, "Site is down for maintenance")
username = self.get_argument("username", "").strip().lower()
passwd = self.get_argument("password", "")
nextpage = self.get_argument("next", None)
if nextpage is None:
if "auth/" not in self.request.headers['Referer']:
nextpage = self.request.headers['Referer']
else:
nextpage = "%s/" % qiita_config.portal_dir
msg = ""
# check the user level
try:
if User(username).level == "unverified":
# email not verified so dont log in
msg = ("Email not verified. Please check your email and click "
"the verify link. You may need to check your spam "
"folder to find the email.<br/>If a verification email"
" has not arrived in 15 minutes, please email <a href='"
"mailto:[email protected]'>[email protected]</a>")
except QiitaDBUnknownIDError:
msg = "Unknown user"
except RuntimeError:
# means DB not available, so set maintenance mode and failover
r_client.set("maintenance", "Database connection unavailable, "
"please try again later.")
self.redirect("%s/" % qiita_config.portal_dir)
return
# Check the login information
login = None
try:
login = User.login(username, passwd)
except IncorrectEmailError:
msg = "Unknown user"
except IncorrectPasswordError:
msg = "Incorrect password"
except UnverifiedEmailError:
msg = "You have not verified your email address"
if login:
# everything good so log in
self.set_current_user(username)
self.redirect(nextpage)
else:
self.render("index.html", message=msg, level='danger')
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", json_encode(user))
else:
self.clear_cookie("user")
class AuthLogoutHandler(BaseHandler):
"""Logout handler, no page necessary"""
def get(self):
self.clear_cookie("user")
self.redirect("%s/" % qiita_config.portal_dir)
| squirrelo/qiita | qiita_pet/handlers/auth_handlers.py | Python | bsd-3-clause | 5,962 |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import res_company
import res_company_ldap
import res_users
import auth_ldap_config_settings | chienlieu2017/it_management | odoo/addons/auth_ldap/models/__init__.py | Python | gpl-3.0 | 168 |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example sets custom field values on a line item.
To determine which custom fields exist, run get_all_custom_fields.py.
To determine which line item exist, run get_all_line_items.py.
To create custom field options, run create_custom_field_options.py
Tags: CustomFieldService.getCustomField
Tags: LineItemService.getLineItem
"""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Set the ID of the custom fields, custom field option, and line item.
CUSTOM_FIELD_ID = 'INSERT_STRING_CUSTOM_FIELD_ID_HERE'
DROP_DOWN_CUSTOM_FIELD_ID = 'INSERT_DROP_DOWN_CUSTOM_FIELD_ID_HERE'
CUSTOM_FIELD_OPTION_ID = 'INSERT_CUSTOM_FIELD_OPTION_ID_HERE'
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client, custom_field_id, drop_down_custom_field_id,
custom_field_option_id, line_item_id):
# Initialize appropriate services.
custom_field_service = client.GetService(
'CustomFieldService', version='v201204')
line_item_service = client.GetService('LineItemService', version='v201204')
# Get custom field.
custom_field = custom_field_service.GetCustomField(custom_field_id)[0]
# Get drop-down custom field.
drop_down_custom_field = custom_field_service.GetCustomField(
drop_down_custom_field_id)[0]
# Get line item.
line_item = line_item_service.GetLineItem(line_item_id)[0]
if custom_field and line_item:
# Create custom field values.
custom_field_value = {
'customFieldId': custom_field['id'],
'type': 'CustomFieldValue',
'value': {
'type': 'TextValue',
'value': 'Custom field value'
}
}
drop_down_custom_field_value = {
'customFieldId': drop_down_custom_field['id'],
'type': 'DropDownCustomFieldValue',
'customFieldOptionId': custom_field_option_id,
}
custom_field_values = [custom_field_value, drop_down_custom_field_value]
old_custom_field_values = []
if 'customFieldValues' in line_item:
old_custom_field_values = line_item['customFieldValues']
# Only add existing custom field values for different custom fields than the
# ones you are setting.
for old_custom_field_value in old_custom_field_values:
if (old_custom_field_value['customFieldId'] != custom_field_value['id']
and old_custom_field_value['customFieldId'] !=
drop_down_custom_field_value['id']):
custom_field_values.append(old_custom_field_value)
line_item['customFieldValues'] = custom_field_values
# Update the line item on the server.
line_items = line_item_service.UpdateLineItems([line_item])
# Display results.
if line_items:
for line_item in line_items:
custom_field_value_strings = []
for value in line_item['customFieldValues']:
if value['BaseCustomFieldValue_Type'] == 'CustomFieldValue':
custom_field_value_string = (
'{ID: \'%s\', value: \'%s\'}'
% (value['customFieldId'], value['value']['value']))
elif value['BaseCustomFieldValue_Type'] == 'DropDownCustomFieldValue':
custom_field_value_string = (
'{ID: \'%s\', custom field option ID: \'%s\'}'
% (value['customFieldId'], value['customFieldOptionId']))
custom_field_value_strings.append(custom_field_value_string)
print ('Line item with ID \'%s\' set with custom field values %s.'
% (line_item['id'], ','.join(custom_field_value_strings)))
else:
print 'No line items were updated.'
else:
print 'Line item or custom field not found.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, CUSTOM_FIELD_ID, DROP_DOWN_CUSTOM_FIELD_ID,
CUSTOM_FIELD_OPTION_ID, LINE_ITEM_ID)
| donspaulding/adspygoogle | examples/adspygoogle/dfp/v201204/custom_field_service/set_line_item_custom_field_value.py | Python | apache-2.0 | 4,733 |
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 Hardy Beltran Monasterios
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import urllib.parse
import rb
import re
import sys
# Deal with html entities and utf-8
# code taken from django/utils/text.py
from html.entities import name2codepoint
pattern = re.compile("&(#?\w+?);")
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return chr(c)
except ValueError:
return match.group(0)
else:
try:
return chr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
def unescape_entities(text):
return pattern.sub(_replace_entity, text)
class TerraParser (object):
def __init__(self, artist, title):
self.artist = artist
self.title = title
def search(self, callback, *data):
path = 'http://letras.mus.br/'
artist = urllib.parse.quote(self.artist)
title = urllib.parse.quote(self.title)
join = urllib.parse.quote(' - ')
wurl = 'winamp.php?t=%s%s%s' % (artist, join, title)
print("search URL: " + wurl)
loader = rb.Loader()
loader.get_url (path + wurl, self.got_lyrics, callback, *data)
def got_lyrics(self, result, callback, *data):
if result is None:
callback (None, *data)
return
if result is not None:
result = result.decode('utf-8')
if re.search('Música não encontrada', result):
print("not found")
callback (None, *data)
elif re.search('<div id="letra">', result):
callback(self.parse_lyrics(result), *data)
else:
callback (None, *data)
else:
callback (None, *data)
def parse_lyrics(self, source):
def unspace(x):
return " ".join(x.split())
def untag(x):
return re.sub('<.*?>', '\n', x)
source = re.split('<div id="letra">', source)[1]
source = re.split('</?div.*?>', source)
# source[1] = artist+title
# source[3] = lyrics
header = "".join(source[1].splitlines())
# <h1><a>title</a></h1> <h2><a>artist</a></h2>
bits = re.findall('<h.>(.*?)</h.>', header)
artistitle = unspace(untag(" - ".join(bits)))
lyrics = unescape_entities(artistitle) + "\n" + unescape_entities(untag(source[3]))
lyrics += "\n\nEsta letra foi disponibilizada pelo site\nhttp://letras.mus.br"
return lyrics
| ruud-v-a/rhythmbox | plugins/lyrics/TerraParser.py | Python | gpl-2.0 | 3,488 |
from unimodel.backends.base import SchemaWriter
import copy
import json
from unimodel import types
from unimodel.backends.json.type_data import get_field_name
from unimodel.util import get_backend_type
"""
Useful: http://www.jsonschema.net/
Example: from http://json-schema.org/example2.html
"""
MAP_DEFINITION_TEMPLATE = {
"description": "map",
"additionalProperties": True,
}
STRUCT_DEFINITION_TEMPLATE = {
"type": "object",
"properties": {}, # Fill with field definitions
"additionalProperties": True,
"required": [], # Fill with required field names
}
SCHEMA_TEMPLATE = dict(copy.deepcopy(
STRUCT_DEFINITION_TEMPLATE).items() + {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": None, # Replace with schema description
"definitions": {} # Fill struct and map type definitions
}.items())
LIST_TEMPLATE = {
"type": "array",
"items": {
"type": None # Replace with type reference to definition of elements
},
"uniqueItems": False # set to True for sets
}
JSONDATA_TEMPLATE = {
"description": "Generic JSONData field",
"additionalProperties": True
}
class JSONSchemaWriter(SchemaWriter):
def __init__(self, *args, **kwargs):
super(JSONSchemaWriter, self).__init__(*args, **kwargs)
def get_schema_ast(self, root_struct_class):
# Collect struct dependencies of root struct (if any).
struct_dependencies = self.get_dependencies_for_one_struct(
root_struct_class)
# Collect struct dependencies of manually added struct classes (if
# any).
for struct_class in self.struct_classes:
self.get_dependencies_for_one_struct(
struct_class,
struct_dependencies)
schema = copy.deepcopy(SCHEMA_TEMPLATE)
schema['description'] = self.description
# Note, the root class will be added to the definitions list
# even if it is only used to desribe the top-level object.
schema['definitions'] = dict(
[self.get_struct_definition(s) for s in struct_dependencies])
self.add_struct_properties(root_struct_class, schema)
return schema
def get_struct_definition(self, struct_class):
""" returns (name, definition) pairs """
struct_def = copy.deepcopy(STRUCT_DEFINITION_TEMPLATE)
self.add_struct_properties(struct_class, struct_def)
return (struct_class.get_name(), struct_def)
def add_struct_properties(self, struct_class, struct_def):
if struct_class.get_field_definitions():
required = []
for field in struct_class.get_field_definitions():
field_name = get_field_name(field)
struct_def['properties'][
field_name] = self.get_type_definition(field.field_type)
if field.required:
required.append(field_name)
struct_def['required'] = required
if 'required' in struct_def and not struct_def['required']:
del struct_def['required']
def get_type_definition(self, type_definition):
""" returns field (name, definition) pair """
if isinstance(type_definition, types.Enum):
return self.define_enum_field(type_definition)
if isinstance(type_definition, types.NumberTypeMarker):
return self.define_basic_field(type_definition)
if isinstance(type_definition, types.StringTypeMarker):
return self.define_basic_field(type_definition)
if isinstance(type_definition, types.Bool):
return self.define_basic_field(type_definition)
if isinstance(type_definition, types.Struct):
# Since all the structs were already collected, and are
# defined in the definitions section, it's enough to refer
# to the struct here.
return self.reference_type(type_definition)
if isinstance(type_definition, types.Map):
return self.define_map_field(type_definition)
if isinstance(type_definition, types.List):
return self.define_array(type_definition)
if isinstance(type_definition, types.JSONData):
return copy.deepcopy(JSONDATA_TEMPLATE)
if isinstance(type_definition, types.Tuple):
return self.define_array(type_definition)
raise Exception(
"Cannot create schema for type %s" %
str(type_definition))
def define_basic_field(self, type_definition):
return copy.deepcopy(get_backend_type("json", type_definition.type_id))
def define_enum_field(self, type_definition):
field_def = {'enum': type_definition.names()}
return field_def
def reference_type(self, type_definition):
return {
"$ref": "#/definitions/%s" %
type_definition.get_python_type().get_name()}
def define_map_field(self, type_definition):
"""
A map looks something like this:
(taken from:
https://github.com/swagger-api/swagger-spec/blob/master/fixtures/v2.0/json/models/modelWithInt32Map.json)
{
"description": "This is a Map[String, Integer]",
"additionalProperties": {
"type": "integer",
"format": "int32"
}
}"""
field_def = copy.deepcopy(MAP_DEFINITION_TEMPLATE)
field_def["description"] = type_definition.get_type_name()
if not isinstance(type_definition.type_parameters[0], types.UTF8):
raise Exception("JSONSchema can only handle maps with UTF8 keys")
field_def['additionalProperties'] = self.get_type_definition(type_definition.type_parameters[1])
return field_def
def define_array(self, type_definition):
field_def = copy.deepcopy(LIST_TEMPLATE)
type_parameter_defs = [self.get_type_definition(t) for t in type_definition.type_parameters]
field_def['items'] = type_parameters_def[0] if len(type_parameter_defs) == 0 else type_parameter_defs
if isinstance(type_definition, types.Set):
field_def['uniqueItems'] = True
return field_def
def get_dependencies_for_field_type(self, field_type, struct_dependencies):
if isinstance(field_type, types.Struct):
self.get_dependencies_for_one_struct(
field_type.get_python_type(),
struct_dependencies)
if field_type.type_parameters:
for type_parameter in field_type.type_parameters:
self.get_dependencies_for_field_type(
type_parameter,
struct_dependencies)
def get_dependencies_for_one_struct(self, cls, struct_dependencies=None):
# It's possible that struct_class is actually an implementation class
# In this case, we want the interface class
struct_dependencies = struct_dependencies or set()
struct_class = self.model_registry.lookup_interface(cls)
if struct_class in struct_dependencies:
# For recursive types, quit if type has already been encountered
return struct_dependencies
# recursively traverse the fields of the struct, looking for new
# dependencies
struct_dependencies.add(struct_class)
if struct_class.get_field_definitions():
for field in struct_class.get_field_definitions():
self.get_dependencies_for_field_type(
field.field_type,
struct_dependencies)
return struct_dependencies
def get_schema_text(self, *args, **kwargs):
return json.dumps(
self.get_schema_ast(*args, **kwargs),
sort_keys=True,
indent=4,
separators=(',', ': '))
| neumark/unimodel | unimodel/backends/json/schema_writer.py | Python | apache-2.0 | 7,800 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ReadPreferences.mark_auto_read_hide_delay'
db.add_column(u'core_readpreferences', 'mark_auto_read_hide_delay',
self.gf('django.db.models.fields.IntegerField')(default=0, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ReadPreferences.mark_auto_read_hide_delay'
db.delete_column(u'core_readpreferences', 'mark_auto_read_hide_delay')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': 'b0755ea163e24093ab9cc0a535b766f5'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'identities_rel_+'", 'null': 'True', 'to': "orm['core.Author']"}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'null': 'True', 'blank': 'True'}),
'origin_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'origin_id_str': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'website_data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'})
},
'core.baseaccount': {
'Meta': {'object_name': 'BaseAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseaccount_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['base.User']"})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.chaineditem': {
'Meta': {'object_name': 'ChainedItem'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chained_items'", 'to': "orm['core.ProcessingChain']"}),
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'item_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.chaineditemparameter': {
'Meta': {'object_name': 'ChainedItemParameter'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ChainedItem']"}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.historicalarticle': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalArticle'},
u'baseitem_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'db_index': 'True'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_2': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_3': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'object_name': 'MailAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'header_field': ('django.db.models.fields.IntegerField', [], {'default': '4', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'0283814071e540d8b5b87ce626b21042'", 'max_length': '32', 'blank': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'feedparser': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'raw_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'raw_email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.processingchain': {
'Meta': {'object_name': 'ProcessingChain'},
'applies_on': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.ProcessorCategory']"}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ProcessingChain']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processingerror': {
'Meta': {'object_name': 'ProcessingError'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'errors'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'is_temporary': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issue_ref': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'processor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'errors'", 'null': 'True', 'to': "orm['core.ChainedItem']"})
},
'core.processor': {
'Meta': {'object_name': 'Processor'},
'accept_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.ProcessorCategory']"}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Processor']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_processors'", 'null': 'True', 'to': u"orm['base.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Processor']"}),
'process_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'source_address': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processorcategory': {
'Meta': {'object_name': 'ProcessorCategory'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_categories'", 'null': 'True', 'to': u"orm['base.User']"}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessorCategory']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'source_address': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_categories'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mark_auto_read_hide_delay': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.WebSite']"})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'8006de42e4014bbabd39e74071821109'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'blank': 'True'}),
'strategy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sync_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'core.tweet': {
'Meta': {'object_name': 'Tweet', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tweets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'entities_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mentions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mentions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'})
},
'core.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'fetch_owned_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fetch_subscribed_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'social_auth': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'twitter_account'", 'unique': 'True', 'to': u"orm['default.UserSocialAuth']"}),
'timeline': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'twitter_account'", 'unique': 'True', 'null': 'True', 'to': "orm['core.TwitterFeed']"})
},
'core.twitterfeed': {
'Meta': {'object_name': 'TwitterFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'twitter_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.TwitterAccount']"}),
'backfill_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'is_backfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'track_locations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'track_terms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'core.twitterfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'TwitterFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.TwitterFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'twitterfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.TwitterFeed']"})
},
'core.usercounters': {
'Meta': {'object_name': 'UserCounters'},
'placeholder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_counters'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
'processing_chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'websites'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'processing_parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
u'default.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth', 'db_table': "'social_auth_usersocialauth'"},
'extra_data': ('social.apps.django_app.default.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['base.User']"})
}
}
complete_apps = ['core'] | WillianPaiva/1flow | oneflow/core/migrations/0121_auto__add_field_readpreferences_mark_auto_read_hide_delay.py | Python | agpl-3.0 | 73,381 |
# -*- coding: utf-8 -*-
from scrapy.selector import Selector
import scrapy
from scrapy.contrib.loader import ItemLoader
from fun.items import CoserItem
class CoserSpider(scrapy.Spider):
name = "coser"
allowed_domains = ["bcy.net"]
start_urls = (
'http://bcy.net/cn125101',
'http://bcy.net/cn126487',
'http://bcy.net/cn126173'
)
def parse(self, response):
sel = Selector(response)
for link in sel.xpath("//ul[@class='js-articles l-works']/li[@class='l-work--big']/article[@class='work work--second-created']/h2[@class='work__title']/a/@href").extract():
link = 'http://bcy.net%s' % link
request = scrapy.Request(link, callback=self.parse_item)
yield request
def parse_item(self, response):
l = ItemLoader(item=CoserItem(), response=response)
l.add_xpath('name', "//h1[@class='js-post-title']/text()")
l.add_xpath('info', "//div[@class='post__info']/div[@class='post__type post__info-group']/span/text()")
urls = l.get_xpath('//img[@class="detail_std detail_clickable"]/@src')
urls = [url.replace('/w650', '') for url in urls]
l.add_value('image_urls', urls)
l.add_value('url', response.url)
return l.load_item()
| lhbpping/climb-picture | fun_crawler/fun/spiders/coser.py | Python | apache-2.0 | 1,280 |
VERSION = "2.0.5"
| akesterson/dpath-python | dpath/version.py | Python | mit | 18 |
"""Identify windows with very high depth for potential filtering.
In non-targeted experiments, high depth regions are often due to collapsed repeats
or other structure which can create long run times and incorrect results in
small and structural variant calling.
"""
import os
import shutil
import subprocess
import sys
import numpy
import yaml
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
def _get_files(data):
work_bam = dd.get_align_bam(data) or dd.get_work_bam(data)
out_file = "%s-highdepth.bed" % utils.splitext_plus(work_bam)[0]
stats_file = "%s-stats.yaml" % utils.splitext_plus(out_file)[0]
return work_bam, out_file, stats_file
def identify(data):
"""Identify high depth regions in the alignment file for potential filtering.
"""
high_multiplier = 20
sample_size = int(1e6)
high_percentage = 25.0
min_coverage = 10
window_size = 250
work_bam, out_file, stats_file = _get_files(data)
if not os.path.exists(out_file) and dd.get_coverage_interval(data) == "genome":
cores = dd.get_num_cores(data)
with file_transaction(data, out_file) as tx_out_file:
tx_raw_file = "%s-raw%s" % utils.splitext_plus(tx_out_file)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cmd = ("sambamba depth window -t {cores} -c {min_coverage} "
"--window-size {window_size} {work_bam} "
"| head -n {sample_size} "
"""| cut -f 5 | {py_cl} -l 'numpy.median([float(x) for x in l if not x.startswith("mean")])'""")
try:
median_cov = float(subprocess.check_output(cmd.format(**locals()), shell=True))
except ValueError:
median_cov = None
if median_cov and not numpy.isnan(median_cov):
high_thresh = int(high_multiplier * median_cov)
cmd = ("sambamba depth window -t {cores} -c {median_cov} "
"--window-size {window_size} -T {high_thresh} {work_bam} "
"| {py_cl} -fx 'float(x.split()[5]) >= {high_percentage} "
"""if not x.startswith("#") else None' """
"| cut -f 1-3,7 > {tx_raw_file} ")
do.run(cmd.format(**locals()), "Identify high coverage regions")
with open(stats_file, "w") as out_handle:
yaml.safe_dump({"median_cov": median_cov}, out_handle,
allow_unicode=False, default_flow_style=False)
else:
with open(tx_raw_file, "w") as out_handle:
out_handle.write("")
if utils.file_exists(tx_raw_file):
cmd = "bedtools merge -i {tx_raw_file} -c 4 -o distinct > {tx_out_file}"
do.run(cmd.format(**locals()), "Clean up raw coverage file")
else:
shutil.move(tx_raw_file, tx_out_file)
return out_file if os.path.exists(out_file) else None
def get_median_coverage(data):
stats_file = _get_files(data)[-1]
if not utils.file_exists(stats_file):
return 0
else:
with open(stats_file) as in_handle:
stats = yaml.safe_load(in_handle)
return stats["median_cov"]
| Cyberbio-Lab/bcbio-nextgen | bcbio/bam/highdepth.py | Python | mit | 3,368 |
from bot import data as botData # noqa: F401
from .message import Message
from .permissions import ChatPermissionSet, WhisperPermissionSet
from .. import cache # noqa: F401
from .. import database as databaseM # noqa: F401
from bot.twitchmessage import IrcMessageTagsReadOnly
from datetime import datetime
from typing import Awaitable, Callable, Iterable, List, NamedTuple
from typing import Optional, Union
Send = Callable[[Union[str, Iterable[str]]], None]
class ChatCommandArgs(NamedTuple):
data: 'cache.CacheStore'
chat: 'botData.Channel'
tags: IrcMessageTagsReadOnly
nick: str
message: Message
permissions: ChatPermissionSet
timestamp: datetime
class WhisperCommandArgs(NamedTuple):
data: 'cache.CacheStore'
nick: str
message: Message
permissions: WhisperPermissionSet
timestamp: datetime
class CustomFieldArgs(NamedTuple):
field: str
param: Optional[str]
prefix: Optional[str]
suffix: Optional[str]
default: Optional[str]
message: Message
channel: str
nick: str
permissions: ChatPermissionSet
timestamp: datetime
class CustomProcessArgs(NamedTuple):
data: 'cache.CacheStore'
chat: 'botData.Channel'
tags: IrcMessageTagsReadOnly
nick: str
permissions: ChatPermissionSet
broadcaster: str
level: str
command: str
messages: List[str]
class ManageBotArgs(NamedTuple):
data: 'cache.CacheStore'
permissions: Union[ChatPermissionSet, WhisperPermissionSet]
send: Send
nick: str
message: Message
ChatCommand = Callable[[ChatCommandArgs], Awaitable[bool]]
WhisperCommand = Callable[[WhisperCommandArgs], Awaitable[bool]]
CustomCommandField = Callable[[CustomFieldArgs], Awaitable[Optional[str]]]
CustomCommandProcess = Callable[[CustomProcessArgs], Awaitable[None]]
ManageBotCommand = Callable[[ManageBotArgs], Awaitable[bool]]
class CustomCommand(NamedTuple):
message: str
broadcaster: str
level: str
class CommandActionTokens(NamedTuple):
action: str
broadcaster: str
level: Optional[str]
command: str
text: str
class CustomFieldParts(NamedTuple):
plainText: str
field: Optional[str]
format: Optional[str] # noqa: E701
prefix: Optional[str]
suffix: Optional[str]
param: Optional[str]
default: Optional[str]
original: Optional[str]
| MeGotsThis/BotGotsThis | lib/data/_types.py | Python | gpl-3.0 | 2,364 |
import sys
from qt import *
from kuralib import kuraapp
from kuralib.lng_lex import *
from kuragui.guitabdialog import guiTabDialog
from kuragui.guilistview import guiListView
from kuragui.guidetaillist import guiDetailList
from kuragui.guicombobox import guiComboBox
from kuragui.constants import *
from kuragui.guilineedit import guiLineEdit
from kuragui.guiconfig import guiConf
from formtags import TagsTab
from wdglexeme import tabLexeme
from dlglexeme import dlgLexeme
from dlglexchooser import dlgLexChooser
from resource import *
from dbobj.dbexceptions import dbError
class tabElementSplit(QWidget):
def __init__(self, parent, parentRecord):
QWidget.__init__(self, parent)
self.parentRecord=parentRecord
wdgElementLayout = QGridLayout(self)
wdgElementLayout.setSpacing(6)
wdgElementLayout.setMargin(11)
self.lblElement = QLabel(self,'lblElement')
self.lblElement.setText("Separate the morphemes with a dot.")
wdgElementLayout.addWidget(self.lblElement,0,0)
self.txtElement = guiLineEdit(self)
self.txtElement.setFont(guiConf.widgetfont)
self.txtElement.setText(parentRecord.getFieldValue("text"))
wdgElementLayout.addWidget(self.txtElement,0,1)
self.lblSubType=QLabel(self)
self.lblSubType.setText("Type")
wdgElementLayout.addWidget(self.lblSubType, 1, 0)
self.cmbSubType=guiComboBox(self)
wdgElementLayout.addWidget(self.cmbSubType, 1, 1)
self.cmbSubType.fillComboBox(self.parentRecord, "elementtypecode", INSERT)
self.lblSeparate = QLabel(self,'lblSeparate')
self.lblSeparate.setText(
'Nota Bene: all current morpheme data for this element will be deleted.')
self.lblSeparate.setAlignment(QLabel.WordBreak |
QLabel.AlignVCenter |
QLabel.AlignLeft)
wdgElementLayout.addWidget(self.lblSeparate,2,0)
self.bnSeparate = QPushButton(self,'bnSeparate')
self.bnSeparate.setText('Create morphemes')
self.connect(self.bnSeparate, SIGNAL("clicked()")
, self.slotSplitElement)
wdgElementLayout.addWidget(self.bnSeparate,2,1)
self.lsvChildElements = QListView(self,'lsvChildElements')
self.lsvChildElements.setFont(guiConf.widgetfont)
self.lsvChildElements.setSorting(-1, FALSE)
self.lsvChildElements.setRootIsDecorated(TRUE)
self.lsvChildElements.setShowSortIndicator(FALSE)
self.lsvChildElements.setTreeStepSize(40)
self.lsvChildElements.addColumn("Element")
self.lsvChildElements.setColumnWidthMode(0, QListView.Maximum)
self.connect(self.lsvChildElements, SIGNAL("returnPressed(QListViewItem *)")
, self.slotItemSelected)
self.connect(self.lsvChildElements, SIGNAL("doubleClicked(QListViewItem *)")
, self.slotItemSelected)
wdgElementLayout.addMultiCellWidget(self.lsvChildElements,3,3,0,1)
self.refresh()
def refresh(self):
self.item_to_record={}
self.record_to_item={}
self.lsvChildElements.clear()
self.addChildren(self.lsvChildElements, self.parentRecord.elementnr)
def addChildren(self, parent, elementnr):
if elementnr == None:
return
previous=None
for elmt in kuraapp.app.getObjects("lng_element"
, parent_elementnr = elementnr):
if previous==None:
item=QListViewItem(parent)
else:
item=QListViewItem(parent, previous)
item.setText(0, elmt.getFieldValue("text"))
previous=item
self.item_to_record[item]=elmt
self.record_to_item[elmt]=item
self.addChildren(item, elmt.elementnr)
def slotSplitElement(self):
if self.lsvChildElements.childCount() > 0:
if QMessageBox.warning(self, "Kura"
, "This element already has sub-elements. Do you want to delete them?"
, "Yes", "No"
) == 1:
return
try:
self.parentRecord.deleteChildren("lng_element")
except dbError, dberr:
QMessageBox.information(self
, "Error while deleting"
, dberr.errorMessage)
els=unicode(self.txtElement.text()).split(".")
if len(els) > 1:
seqnr=0
for element in els:
record=kuraapp.app.createObject("lng_element"
, streamnr=self.parentRecord.streamnr
, seqnr=seqnr
, parent_elementnr=self.parentRecord.elementnr
, text=element
, languagenr=self.parentRecord.languagenr
, elementtypecode=self.cmbSubType.currentKey()
, usernr=guiConf.usernr
)
record.insert()
seqnr+=1
self.refresh()
def slotItemSelected(self, item):
self.dlgOpen = dlgElement(kuraapp.app,
self,
'Edit Morpheme', "Morpheme",
self.item_to_record[item],
UPDATE,
kuraapp.app.tables["lng_element"])
self.connect(self.dlgOpen, PYSIGNAL("sigAcceptData")
, self.slotOpenAccept)
self.dlgOpen.show()
def slotOpenAccept(self):
rec = self.dlgOpen.getMasterRecord()
item = self.record_to_item[rec]
item.setText(0, rec.text)
class tabElementLexeme(tabLexeme):
def __init__(self, parent, parentRecord):
tabLexeme.__init__(self, parent)
self.parentRecord=parentRecord
if self.parentRecord.lexnr != None:
self.lexeme=kuraapp.app.getObject( "lng_lex"
, lexnr=self.parentRecord.lexnr
)
self.__setValues(self.lexeme)
else:
self.lexeme = None
self.connect(self.bnZoom, SIGNAL("clicked()"), self.__zoom)
self.connect(self.bnPick, SIGNAL("clicked()"), self.__pick)
self.connect(self.bnAdd, SIGNAL("clicked()"), self.__add)
def __setValues(self, lexeme):
self.txtForm.setFont(guiConf.widgetfont)
self.txtPhoneticForm.setFont(guiConf.widgetfont)
self.txtGlosse.setFont(guiConf.widgetfont)
self.txtDescription.setFont(guiConf.widgetfont)
self.txtLanguage.setFont(guiConf.widgetfont)
self.txtForm.setText(lexeme.getFieldValue("form"))
self.txtPhoneticForm.setText(lexeme.getFieldValue("phonetic_form"))
self.txtGlosse.setText(lexeme.getFieldValue("glosse"))
self.txtDescription.setText(lexeme.getFieldValue("description"))
self.txtLanguage.setText(lexeme.getFieldValue("language"))
def __zoom(self):
if self.lexeme:
self.dlgZoom=dlgLexeme(kuraapp.app, self, 'Edit lexical item', "Lexeme",
self.lexeme, UPDATE, self.lexeme.tableDef)
self.connect(self.dlgZoom, PYSIGNAL("sigAcceptData")
, self.slotZoomAccept)
self.dlgZoom.show()
def slotZoomAccept(self):
self.lexeme=self.dlgZoom.getMasterRecord()
self.parentRecord.lexnr=self.lexeme.lexnr
self.__setValues(self.lexeme)
def __pick(self):
#
# This is a modal dialog
#
dlgPick=dlgLexChooser(self, self.parentRecord)
dlgPick.txtForm.setText(self.parentRecord.getFieldValue("text"))
dlgPick.refreshSource()
if dlgPick.exec_loop()==1:
self.lexeme=dlgPick.masterRecord
self.parentRecord.lexnr=self.lexeme.lexnr
self.parentRecord.lexeme=self.lexeme.glosse
self.__setValues(self.lexeme)
def __add(self):
if QMessageBox.warning(self, "Kura"
, "Do you want to add this element to the lexicon?"
, "Yes", "No", "Cancel"
, 2, 3
) == 0:
self.lexeme=kuraapp.app.createObject("lng_lex"
, languagenr=self.parentRecord.languagenr
, form=self.parentRecord.text
, usernr=guiConf.usernr
, phonetic_form=self.parentRecord.getPhoneticTranscription()
, glosse=self.parentRecord.getGlosse()
, description=self.parentRecord.getDescription()
)
if self.lexeme.glosse is None:
self.lexeme.glosse="<empty>"
self.lexeme.insert()
self.parentRecord.lexnr=self.lexeme.lexnr
self.__setValues(self.lexeme)
class dlgElement(guiTabDialog):
def __init__(self, app, parent, title, firstTabTitle,
record, mode, tableDef, showChildren=FALSE):
guiTabDialog.__init__( self
, app=app
, parent=parent
, title=title
, firstTabTitle="&Elements"
, record=record
, mode=mode
, tableDef=tableDef
, showChildren=FALSE
, addBottomSpring=TRUE)
self.tagstab=TagsTab(self, app, record, "lng_element_tag")
guiTabDialog.addChildTab( self
, "&Tags"
, self.tagstab
, record
, DETAIL)
guiTabDialog.addChildTab(self
, "&Related Lexeme"
, tabElementLexeme(self, record)
, record
, DETAIL)
guiTabDialog.addChildTab( self
, "&Morphemes"
, tabElementSplit(self, record)
, record
, DETAIL)
def accept(self):
try:
self.tagstab.formTags.save()
except Exception, e:
QMessageBox.critical(self, "Error saving tags", unicode(e))
guiTabDialog.accept(self)
__copyright__="""
copyright : (C) 2002 by Boudewijn Rempt
see copyright notice for license
email : [email protected]
"""
__revision__="""$Revision: 1.10 $"""[11:-2]
| boudewijnrempt/kura | kuraclient/dlgelement.py | Python | bsd-2-clause | 10,478 |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from trove.common import cfg
from trove.common import exception
from trove.common import instance as rd_instance
from trove.guestagent import dbaas
from trove.guestagent import backup
from trove.guestagent import volume
from trove.guestagent.datastore.mysql.service import MySqlAppStatus
from trove.guestagent.datastore.mysql.service import MySqlAdmin
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.strategies.replication import get_replication_strategy
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
from trove.openstack.common import periodic_task
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
REPLICATION_STRATEGY = CONF.get(MANAGER).replication_strategy
REPLICATION_NAMESPACE = CONF.get(MANAGER).replication_namespace
REPLICATION_STRATEGY_CLASS = get_replication_strategy(REPLICATION_STRATEGY,
REPLICATION_NAMESPACE)
class Manager(periodic_task.PeriodicTasks):
@periodic_task.periodic_task(ticks_between_runs=3)
def update_status(self, context):
"""Update the status of the MySQL service."""
MySqlAppStatus.get().update()
def change_passwords(self, context, users):
return MySqlAdmin().change_passwords(users)
def update_attributes(self, context, username, hostname, user_attrs):
return MySqlAdmin().update_attributes(username, hostname, user_attrs)
def reset_configuration(self, context, configuration):
app = MySqlApp(MySqlAppStatus.get())
app.reset_configuration(configuration)
def create_database(self, context, databases):
return MySqlAdmin().create_database(databases)
def create_user(self, context, users):
MySqlAdmin().create_user(users)
def delete_database(self, context, database):
return MySqlAdmin().delete_database(database)
def delete_user(self, context, user):
MySqlAdmin().delete_user(user)
def get_user(self, context, username, hostname):
return MySqlAdmin().get_user(username, hostname)
def grant_access(self, context, username, hostname, databases):
return MySqlAdmin().grant_access(username, hostname, databases)
def revoke_access(self, context, username, hostname, database):
return MySqlAdmin().revoke_access(username, hostname, database)
def list_access(self, context, username, hostname):
return MySqlAdmin().list_access(username, hostname)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
return MySqlAdmin().list_databases(limit, marker,
include_marker)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
return MySqlAdmin().list_users(limit, marker,
include_marker)
def enable_root(self, context):
return MySqlAdmin().enable_root()
def is_root_enabled(self, context):
return MySqlAdmin().is_root_enabled()
def _perform_restore(self, backup_info, context, restore_location, app):
LOG.info(_("Restoring database from backup %s.") % backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
LOG.exception(_("Error performing restore from backup %s.") %
backup_info['id'])
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully."))
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None):
"""Makes ready DBAAS on a Guest container."""
MySqlAppStatus.get().begin_install()
# status end_mysql_install set with secure()
app = MySqlApp(MySqlAppStatus.get())
app.install_if_needed(packages)
if device_path:
#stop and do not update database
app.stop_db()
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
#rsync exiting data
device.migrate_data(mount_point)
#mount the volume
device.mount(mount_point)
LOG.debug("Mounted the volume.")
app.start_mysql()
if backup_info:
self._perform_restore(backup_info, context,
mount_point, app)
LOG.debug("Securing MySQL now.")
app.secure(config_contents, overrides)
enable_root_on_restore = (backup_info and
MySqlAdmin().is_root_enabled())
if root_password and not backup_info:
app.secure_root(secure_remote_root=True)
MySqlAdmin().enable_root(root_password)
elif enable_root_on_restore:
app.secure_root(secure_remote_root=False)
MySqlAppStatus.get().report_root('root')
else:
app.secure_root(secure_remote_root=True)
app.complete_install_or_restart()
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
LOG.info(_('Completed setup of MySQL database instance.'))
def restart(self, context):
app = MySqlApp(MySqlAppStatus.get())
app.restart()
def start_db_with_conf_changes(self, context, config_contents):
app = MySqlApp(MySqlAppStatus.get())
app.start_db_with_conf_changes(config_contents)
def stop_db(self, context, do_not_start_on_reboot=False):
app = MySqlApp(MySqlAppStatus.get())
app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
mount_point = CONF.get(MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def create_backup(self, context, backup_info):
"""
Entry point for initiating a backup for this guest agents db instance.
The call currently blocks until the backup is complete or errors. If
device_path is specified, it will be mounted based to a point specified
in configuration.
:param backup_info: a dictionary containing the db instance id of the
backup task, location, type, and other data.
"""
backup.backup(context, backup_info)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug("Mounted the device %s at the mount point %s." %
(device_path, mount_point))
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug("Unmounted the device %s from the mount point %s." %
(device_path, mount_point))
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug("Resized the filesystem %s." % mount_point)
def update_overrides(self, context, overrides, remove=False):
LOG.debug("Updating overrides (%s)." % overrides)
app = MySqlApp(MySqlAppStatus.get())
app.update_overrides(overrides, remove=remove)
def apply_overrides(self, context, overrides):
LOG.debug("Applying overrides (%s)." % overrides)
app = MySqlApp(MySqlAppStatus.get())
app.apply_overrides(overrides)
def get_replication_snapshot(self, context, snapshot_info):
LOG.debug("Getting replication snapshot.")
app = MySqlApp(MySqlAppStatus.get())
replication = REPLICATION_STRATEGY_CLASS(context)
replication.enable_as_master(app, snapshot_info)
snapshot_id, log_position = (
replication.snapshot_for_replication(context, app, None,
snapshot_info))
mount_point = CONF.get(MANAGER).mount_point
volume_stats = dbaas.get_filesystem_volume_stats(mount_point)
replication_snapshot = {
'dataset': {
'datastore_manager': MANAGER,
'dataset_size': volume_stats.get('used', 0.0),
'volume_size': volume_stats.get('total', 0.0),
'snapshot_id': snapshot_id
},
'replication_strategy': REPLICATION_STRATEGY,
'master': replication.get_master_ref(app, snapshot_info),
'log_position': log_position
}
return replication_snapshot
def _validate_slave_for_replication(self, context, snapshot):
if (snapshot['replication_strategy'] != REPLICATION_STRATEGY):
raise exception.IncompatibleReplicationStrategy(
snapshot.update({
'guest_strategy': REPLICATION_STRATEGY
}))
mount_point = CONF.get(MANAGER).mount_point
volume_stats = dbaas.get_filesystem_volume_stats(mount_point)
if (volume_stats.get('total', 0.0) <
snapshot['dataset']['dataset_size']):
raise exception.InsufficientSpaceForReplica(
snapshot.update({
'slave_volume_size': volume_stats.get('total', 0.0)
}))
def attach_replication_slave(self, context, snapshot, slave_config):
LOG.debug("Attaching replication snapshot.")
app = MySqlApp(MySqlAppStatus.get())
try:
self._validate_slave_for_replication(context, snapshot)
replication = REPLICATION_STRATEGY_CLASS(context)
replication.enable_as_slave(app, snapshot)
except Exception:
LOG.exception("Error enabling replication.")
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
def detach_replica(self, context):
LOG.debug("Detaching replica.")
app = MySqlApp(MySqlAppStatus.get())
replication = REPLICATION_STRATEGY_CLASS(context)
replication.detach_slave(app)
def demote_replication_master(self, context):
LOG.debug("Demoting replication master.")
app = MySqlApp(MySqlAppStatus.get())
replication = REPLICATION_STRATEGY_CLASS(context)
replication.demote_master(app)
| changsimon/trove | trove/guestagent/datastore/mysql/manager.py | Python | apache-2.0 | 11,645 |
from time import sleep
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
from copy import deepcopy
import requests
import traceback
from typing import Dict, Tuple
from datetime import timezone
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
IMPORTANCE_DICTIONARY = {
'Low': '0',
'Medium': '1',
'High': '2'
}
ONGOING_DICTIONARY = {
'Ongoing': 'true',
'Not Ongoing': 'false',
}
IP_DICTIONARY = {
'IPv4': 4,
'IPv6': 6
}
ROUTERS_HR_HEADERS = [
'id',
'name',
'description',
'is_proxy',
'license_type',
'snmp_authprotocol',
'snmp_priv_protocol',
'snmp_security_level',
'snmp_version',
]
MANAGED_OBJECTS_HR_HEADERS = [
'id',
'name',
'tags',
'match_type',
'match_enabled',
'match',
'family',
'autodetected'
]
''' CLIENT CLASS '''
class NetscoutClient(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
"""
OPERATOR_NAME_DICTIONARY = {
# <parm name>: <argument operator name>
'importance': 'importance_operator',
'start_time': 'start_time_operator',
'stop_time': 'stop_time_operator',
}
RELATIONSHIP_TO_TYPE = {
'routers': 'router'
}
MAX_ALERTS_FOR_FIRST_FETCH = 10000
def __init__(self, base_url, verify, proxy, first_fetch, headers=None, max_fetch=None, alert_class=None,
alert_type=None, classification=None, importance=None, ongoing=None):
self.first_fetch = first_fetch
self.max_fetch = max_fetch
self.alert_class = alert_class
self.alert_type = alert_type
self.classification = classification
self.importance = importance
self.ongoing = ongoing
self.importance_operator = '>'
super().__init__(base_url=base_url, verify=verify, headers=headers, proxy=proxy)
def http_request(self, method: str, url_suffix: Optional[str] = None, params: Optional[dict] = None,
json_data: Optional[dict] = None, return_empty_response: Optional[bool] = None,
status_list_to_retry: list = None):
return super()._http_request(method=method, url_suffix=url_suffix, params=params, json_data=json_data,
error_handler=self.error_handler, return_empty_response=return_empty_response,
status_list_to_retry=status_list_to_retry)
@staticmethod
def error_handler(res: requests.Response):
"""
Error handler for API calls
Args:
res (requests.Response): Response to handle error for
"""
try:
# Try to parse json error response
error_entry = res.json()
error: str = f'Error in API call [{res.status_code}] - {res.reason}'
if res.status_code in (400, 422, 404):
error_list: list = []
for err in error_entry.get('errors'):
# Building the list of errors
new_error_source = err.get('source', {}).get('pointer', '').split('/')[-1]
new_error_details = err.get('detail')
new_error = f'{new_error_source}: {new_error_details}' if new_error_source else new_error_details
error_list.append(new_error)
# If we manged to build a list of errors use it otherwise use basic information
if error_list:
error = f'{error}: \n' + '\n'.join(error_list)
elif res.status_code in (500, 401):
message = error_entry.get('errors', [])[0].get('message')
if message:
error = f'{error}\n{message}'
demisto.error(res.text)
raise DemistoException(error)
except ValueError:
raise DemistoException(
f'Could not parse error returned from Netscout Arbor Sightline server:\n{str(res.content)}')
def calculate_amount_of_incidents(self, start_time: str, params_dict: dict) -> int:
"""
Perform an API call with page size = 1 (perPage=1) to calculate the amount of incidents (#pages will be equal to
#incidents).
Arguments:
start_time (str): Starting time to search by
params_dict (dict): The params configured by the user to perform the fetch with.
Returns:
(int) The amount of pages (incidents) in total in the given query, 0 if none.
"""
time_attributes_dict = assign_params(start_time=start_time, start_time_operator='>')
params_dict.update(time_attributes_dict)
data_attribute_filter = self.build_data_attribute_filter(params_dict)
page_size = 1
results = self.list_alerts(page_size=page_size, search_filter=data_attribute_filter, status_list_to_retry=[500])
last_page_link = results.get('links', {}).get('last')
if last_page_link:
last_page_number_matcher = re.match(r'.*&page=(\d+)', last_page_link)
if not last_page_number_matcher:
raise DemistoException(
f'Could not calculate page size, last page number was not found:\n{last_page_link}')
last_page_number = last_page_number_matcher.group(1)
else:
last_page_number = 0
return int(last_page_number)
def build_relationships(self, **kwargs) -> dict:
"""
Builds the relationships object for creating a mitigation. An example of relationships object is:
{
"mitigation_template": {
"data": {
"id": "4", "type": "mitigation_template"
}
},
"alert": {
"data": {
"id": "101", "type": "alert"
"id": "101", "type": "alert"
}
}
}
Args:
kwargs (dict): Dict containing key values parameters to be used for relationships. for example:
{'ip_version': 4}
Returns:
(dict): Netscout relationships object
"""
relationships: Dict[str, Any] = {}
for key, val in kwargs.items():
if val:
# In some cases the name of the relationships is not the same as the type (most cases it is)
_type = self.RELATIONSHIP_TO_TYPE.get(key, key)
if key == 'routers':
relationships[key] = {
'data': [{
'type': _type,
'id': val[0]
}]
}
else:
relationships[key] = {
'data': {
'type': _type,
'id': val
}
}
return relationships
def build_data_attribute_filter(self, attributes_dict: dict) -> str:
"""
Builds data attribute filter in the NetscoutArbor form. For example: '/data/attributes/importance>1' where
key=importance operator='>' and value=1.
The function iterates over all arguments (besides operators listed in the OPERATOR_NAME_DICTIONARY) and chain
together the 'key operator val' such that the argument name is 'key', its value is 'val' and operator is '=' if
no relevant operator is present. In case of multiple parameters the attributes are separated with 'AND'.
Args:
attributes_dict (dict): Dict containing key values filter parameters. for example: {'importance': 1}
Returns:
(str): Netscout data attribute filter string. For example:
/data/attributes/importance>1 AND /data/attributes/ongoing=true
"""
param_list = []
operator_names = self.OPERATOR_NAME_DICTIONARY.values()
for key, val in attributes_dict.items():
# We don't create a filter for operator names
if key not in operator_names and val:
operator = '=' # type: str
# If the current parameter supports a special operator (it appears in the OPERATOR_NAME_DICTIONARY),
# we take the operator value using the operator name (that appears in the OPERATOR_NAME_DICTIONARY)
if operator_name := self.OPERATOR_NAME_DICTIONARY.get(key):
operator = attributes_dict.get(operator_name, '') if attributes_dict.get(
operator_name) else '='
param_list.append(f'/data/attributes/{key + operator + val}')
return ' AND '.join(param_list)
def fetch_incidents(self, params_dict: dict) -> Tuple[list, str]:
"""
Perform fetch incidents process.
1. We first save the current time to know what was the time at the beginning of the incidents counting process.
2. We calculate the amount of incidents we need to fetch by performing a query for all incident newer
than last run (or first fetch), we do this by setting the page size to 1, which makes the amount of returned
pages to be equal to the amount of incidents.
3. Then, to get the relevant incidents, we query for all incidents *older* then the time we sampled in the
step 1, with page size equal to the amount of incidents from step 2. This ensures that the first page in
this search will have all of the incidents created after the given start time and only them.
4. Finally out of the relevant incidents we take the older ones (from the end of the list) and set the new
start time to the creation time of the first incidnt in the list.
Args:
params_dict (dict): The params configured by the user to perform the fetch with.
Returns:
(list, str): List of incidents to save and string representing the creation time of the latest incident to
be saved.
"""
last_run = demisto.getLastRun()
new_last_start_time = last_start_time = last_run.get('LastFetchTime', self.first_fetch)
demisto.debug(f'Last fetch time to use is: {last_start_time}')
# We calculate the page size to query, by performing an incidents query with page size = 1, the amount of
# returned pages will equal to amount of incidents
now = datetime.now(timezone.utc).isoformat()
amount_of_incidents = self.calculate_amount_of_incidents(start_time=last_start_time, params_dict=params_dict)
incidents: list = []
if amount_of_incidents:
time_attributes_dict = assign_params(start_time=now, start_time_operator='<')
params_dict.update(time_attributes_dict)
data_attribute_filter = self.build_data_attribute_filter(params_dict)
demisto.debug(
f'NetscoutArborSightline fetch params are: page_size={amount_of_incidents}, '
f'search_filter={data_attribute_filter}')
# We use the status_list_to_retry since in some rare cases the API returns 500 error on consecutive API
# calls.
results = self.list_alerts(page_size=amount_of_incidents, search_filter=data_attribute_filter,
status_list_to_retry=[500])
all_alerts = results.get('data', [])
short_alert_list = all_alerts[-1 * self.max_fetch:]
if short_alert_list:
new_last_start_time = short_alert_list[0].get('attributes', {}).get('start_time')
for alert in reversed(short_alert_list):
start_time = alert.get('attributes', {}).get('start_time')
alert_type = alert.get('attributes', {}).get('alert_type')
incidents.append({
'name': f"{alert_type}: {alert.get('id')}",
'occurred': start_time,
'rawJSON': json.dumps(alert)
})
return incidents, new_last_start_time
def fetch_incidents_loop(self) -> Tuple[list, str]:
"""
Calls the fetch incidents function to pull incidents with for each alert_type/alert_class separately.
Returns:
(list, str): List of incidents to save and string representing the creation time of the latest incident to
be saved.
"""
incidents = []
params_dict = assign_params(alert_class=self.alert_class, alert_type=self.alert_type,
importance=self.importance, classification=self.classification,
importance_operator=self.importance_operator, ongoing=self.ongoing)
if self.alert_type:
key = 'alert_type'
class_type_list = self.alert_type
elif self.alert_class:
key = 'alert_class'
class_type_list = self.alert_class
if self.alert_class or self.alert_type:
for item in class_type_list:
params_dict[key] = item
last_incidents, new_last_start_time = self.fetch_incidents(params_dict)
incidents += last_incidents
sleep(5)
else:
incidents, new_last_start_time = self.fetch_incidents(params_dict)
return incidents, new_last_start_time
def list_alerts(self, page: Optional[int] = None, page_size: Optional[int] = None,
search_filter: Optional[str] = None, status_list_to_retry: list = None) -> dict:
return self.http_request(
method='GET',
url_suffix='alerts',
status_list_to_retry=status_list_to_retry,
params=assign_params(page=page, perPage=page_size, filter=search_filter)
)
def get_alert(self, alert_id: str) -> dict:
return self.http_request(
method='GET',
url_suffix=f'alerts/{alert_id}'
)
def get_annotations(self, alert_id: str) -> dict:
return self.http_request(
method='GET',
url_suffix=f'alerts/{alert_id}/annotations'
)
def list_mitigations(self, mitigation_id: str, page: Optional[int] = None, page_size: Optional[int] = None) -> dict:
return self.http_request(
method='GET',
url_suffix=f'mitigations/{mitigation_id}' if mitigation_id else 'mitigations',
params=assign_params(page=page, perPage=page_size)
)
def create_mitigation(self, data: dict) -> dict:
return self.http_request(
method='POST',
url_suffix='mitigations/',
json_data=data
)
def delete_mitigation(self, mitigation_id: str):
self.http_request(
method='DELETE',
url_suffix=f'mitigations/{mitigation_id}',
return_empty_response=True
)
def mitigation_template_list(self) -> dict:
return self.http_request(
method='GET',
url_suffix='mitigation_templates/'
)
def router_list(self) -> dict:
return self.http_request(
method='GET',
url_suffix='routers/'
)
def managed_object_list(self, page: Optional[int] = None, page_size: Optional[int] = None) -> dict:
return self.http_request(
method='GET',
url_suffix='managed_objects/',
params=assign_params(page=page, perPage=page_size)
)
def tms_group_list(self) -> dict:
return self.http_request(
method='GET',
url_suffix='tms_groups/'
)
''' HELPER FUNCTIONS '''
def clean_links(target_obj: Union[dict, list]):
"""
Recursively look for a all keys named 'links' and remove them from the object.
Args:
target_obj (dict/list): An object to remove the links key from.
"""
if isinstance(target_obj, dict):
remove_keys(target_obj, ['links'])
for val in target_obj.values():
clean_links(val)
if isinstance(target_obj, list):
for i in target_obj:
clean_links(i)
def validate_json_arg(json_str: str, arg_name: str) -> dict:
"""
Parse the json data. If the format is invalid an appropriate exception will be raised
Args:
json_str (str): The data to parse
arg_name (str): The argument name where the data eas given (for exception purposes)
Return:
(dict): dict representing the given json
"""
try:
sub_object = json.loads(json_str)
return sub_object
except Exception as err:
raise DemistoException(
f'The value given in the {arg_name} argument is not a valid JSON format:\n{json_str}\nERROR:\n{err}')
def remove_keys(obj: dict, keys_to_remove: list):
"""
Removes the the given keys from a given dict.
Args:
obj (dict): The object to remove the key from.
keys_to_remove (lst): List of keys to remove.
"""
for key in keys_to_remove:
if obj.get(key):
del obj[key]
def flatten_key(obj: dict, key_to_flatten: str):
"""
Extract the data inside a given key to the root level of the object.
Args:
obj (dict): The object to extract the data from.
key_to_flatten (str): The key name to extract.
"""
if sub_dictionary := obj.get(key_to_flatten):
for sub_key, sub_val in sub_dictionary.items():
obj[sub_key] = sub_val
del obj[key_to_flatten]
def build_human_readable(data: dict) -> dict:
"""
Removes the relationships and subobject data from the object and extracts the data inside attributes to the root
level of the object to be displayed nicely in human readable.
Args:
data (dict): The data to create human readable from.
Return:
(dict): The same object without the relationships data and with the attributes extracted to the root level.
"""
hr = deepcopy(data)
flatten_key(hr, 'attributes')
remove_keys(hr, ['relationships', 'subobject'])
return hr
def build_output(data: dict, extend_data: bool = False, key_to_flat: str = 'attributes',
keys_to_remove: list = None) -> dict:
keys_to_remove = ['relationships'] if not keys_to_remove else keys_to_remove
data_copy = deepcopy(data)
clean_links(data_copy)
if key_to_flat:
flatten_key(data_copy, key_to_flat)
if not extend_data:
remove_keys(data_copy, keys_to_remove)
return data_copy
def cast_importance_to_minimal(importance: str) -> Optional[str]:
"""
If a minimal importance param was given, cast it to the corresponding minimal value to be used with the '>'
operator.
That is:
High -> '2' -> '1'
Medium -> '1' -> '0'
Low -> '0' -> None (so it will be ignored and will not be used as an importance param)
Args:
importance (str): The importance to cast.
Returns:
(str): The value to be used withh the '>` operator.
"""
str_importance = IMPORTANCE_DICTIONARY.get(importance)
if str_importance and str_importance != '0':
return str(int(str_importance) - 1)
else:
return None
''' COMMAND FUNCTIONS '''
def test_module(client: NetscoutClient) -> str:
client.fetch_incidents_loop()
return 'ok'
def fetch_incidents_command(client: NetscoutClient):
incidents, last_start_time = client.fetch_incidents_loop()
demisto.incidents(incidents)
demisto.setLastRun({'LastFetchTime': last_start_time})
def list_alerts_command(client: NetscoutClient, args: dict):
limit = arg_to_number(args.get('limit'))
page = arg_to_number(args.get('page'))
alert_id = args.get('alert_id')
alert_class = args.get('alert_class')
alert_type = args.get('alert_type')
classification = args.get('classification')
importance = IMPORTANCE_DICTIONARY.get(args.get('importance', ''))
importance_operator = args.get('importance_operator')
ongoing = args.get('ongoing') if args.get('ongoing') else None
start_time = args.get('start_time')
start_time_operator = args.get('start_time_operator')
stop_time = args.get('stop_time')
stop_time_operator = args.get('stop_time_operator')
managed_object_id = args.get('managed_object_id')
extend_data = argToBoolean(args.get('extend_data', False))
if alert_id:
raw_result = client.get_alert(alert_id)
else:
attributes_dict = assign_params(alert_id=alert_id, alert_class=alert_class, alert_type=alert_type,
classification=classification, importance=importance,
importance_operator=importance_operator, ongoing=ongoing, start_time=start_time,
start_time_operator=start_time_operator, stop_time=stop_time,
stop_time_operator=stop_time_operator)
data_attribute_filter = client.build_data_attribute_filter(attributes_dict)
data_relationships_filter = f'AND /data/relationships/managed_object/data/id={managed_object_id}' if \
managed_object_id else ''
search_filter = data_attribute_filter + data_relationships_filter
raw_result = client.list_alerts(page=page, page_size=limit, search_filter=search_filter)
data = raw_result.get('data')
data = data if isinstance(data, list) else [data]
hr = [build_human_readable(data=alert) for alert in data]
outputs = [build_output(data=alert, extend_data=extend_data) for alert in data]
return CommandResults(outputs_prefix='NASightline.Alert',
outputs_key_field='id',
outputs=outputs,
readable_output=tableToMarkdown('Alerts', hr),
raw_response=raw_result)
def alert_annotation_list_command(client: NetscoutClient, args: dict):
alert_id = args.get('alert_id', '')
extend_data = argToBoolean(args.get('extend_data', False))
raw_result = client.get_annotations(alert_id)
data = raw_result.get('data', [])
hr = [build_human_readable(data=annotation) for annotation in data]
annotations = [build_output(data=annotation, extend_data=extend_data) for annotation in data]
context = {'AlertID': alert_id, 'Annotations': annotations}
return CommandResults(outputs_prefix='NASightline.AlertAnnotation',
outputs_key_field='AlertID',
outputs=context,
readable_output=tableToMarkdown(f'Alert {alert_id} annotations', hr),
raw_response=raw_result)
def mitigation_list_command(client: NetscoutClient, args: dict):
page = arg_to_number(args.get('page'))
limit = arg_to_number(args.get('limit'))
mitigation_id = args.get('mitigation_id', '')
extend_data = argToBoolean(args.get('extend_data', False))
raw_result = client.list_mitigations(mitigation_id, page=page, page_size=limit)
data = raw_result.get('data')
data = data if isinstance(data, list) else [data]
hr = [build_human_readable(data=mitigation) for mitigation in data]
mitigations = [build_output(data=mitigation, keys_to_remove=['relationships', 'subobject'], extend_data=extend_data)
for mitigation in data]
return CommandResults(outputs_prefix='NASightline.Mitigation',
outputs_key_field='id',
outputs=mitigations,
readable_output=tableToMarkdown('Mitigation list', hr),
raw_response=raw_result)
def mitigation_create_command(client: NetscoutClient, args: dict):
ip_version = IP_DICTIONARY.get(args['ip_version'])
if not ip_version:
raise DemistoException('ip_version value can be one of the following: '
f'{",".join(list(IP_DICTIONARY.keys()))}. {args.get("ip_version")} was given.')
description = args.get('description')
name = args.get('name')
ongoing = args.get('ongoing', 'false')
sub_type = args.get('sub_type')
sub_object = validate_json_arg(args['sub_object'], 'sub_object')
alert_id = args.get('alert_id')
managed_object_id = args.get('managed_object_id')
mitigation_template_id = args.get('mitigation_template_id')
router_ids = argToList(args.get('router_ids'))
tms_group_id = args.get('tms_group_id')
extend_data = argToBoolean(args.get('extend_data', False))
relationships = client.build_relationships(alert=alert_id, managed_object=managed_object_id,
mitigation_template=mitigation_template_id, routers=router_ids,
tms_group=tms_group_id)
attributes = assign_params(description=description, ip_version=ip_version, name=name, ongoing=ongoing,
subtype=sub_type, subobject=sub_object)
object_data = {'relationships': relationships, 'attributes': attributes}
raw_result = client.create_mitigation(data={'data': object_data})
data = raw_result.get('data', {})
hr = build_human_readable(data=data)
mitigation = build_output(data=data, extend_data=extend_data)
return CommandResults(outputs_prefix='NASightline.Mitigation',
outputs_key_field='id',
outputs=mitigation,
readable_output=tableToMarkdown('Mitigation was created', hr),
raw_response=raw_result)
def mitigation_delete_command(client: NetscoutClient, args: Dict[str, str]):
mitigation_id = args.get('mitigation_id', '')
client.delete_mitigation(mitigation_id)
hr = f'### Mitigation {mitigation_id} was deleted'
return CommandResults(readable_output=hr)
def mitigation_template_list_command(client: NetscoutClient, args: dict):
extend_data = argToBoolean(args.get('extend_data', False))
raw_result = client.mitigation_template_list()
data = raw_result.get('data')
data = data if isinstance(data, list) else [data]
hr = [build_human_readable(data=mitigation_template) for mitigation_template in data]
mitigation_templates = [
build_output(data=mitigation_template, extend_data=extend_data, keys_to_remove=['relationships', 'subobject'])
for mitigation_template in data]
return CommandResults(outputs_prefix='NASightline.MitigationTemplate',
outputs_key_field='id',
outputs=mitigation_templates,
readable_output=tableToMarkdown('Mitigation template list', hr, removeNull=True),
raw_response=raw_result)
def router_list_command(client: NetscoutClient, args: dict):
extend_data = argToBoolean(args.get('extend_data', False))
raw_result = client.router_list()
data = raw_result.get('data')
data = data if isinstance(data, list) else [data]
hr = [build_human_readable(router) for router in data]
routers = [build_output(data=router, extend_data=extend_data) for router in data]
return CommandResults(outputs_prefix='NASightline.Router',
outputs_key_field='id',
outputs=routers,
readable_output=tableToMarkdown('Router list', hr, headers=ROUTERS_HR_HEADERS,
removeNull=True),
raw_response=raw_result)
def managed_object_list_command(client: NetscoutClient, args: dict):
page = arg_to_number(args.get('page'))
limit = arg_to_number(args.get('limit'))
extend_data = argToBoolean(args.get('extend_data', False))
raw_result = client.managed_object_list(page=page, page_size=limit)
data = raw_result.get('data')
data = data if isinstance(data, list) else [data]
objects = [build_output(data=managed_object, extend_data=extend_data) for managed_object in data]
hr = [build_human_readable(data=managed_object) for managed_object in data]
return CommandResults(outputs_prefix='NASightline.ManagedObject',
outputs_key_field='id',
outputs=objects,
readable_output=tableToMarkdown('Managed object list', hr,
headers=MANAGED_OBJECTS_HR_HEADERS, removeNull=True),
raw_response=raw_result)
def tms_group_list_command(client: NetscoutClient, args: dict):
extend_data = argToBoolean(args.get('extend_data', False))
raw_result = client.tms_group_list()
data = raw_result.get('data')
data = data if isinstance(data, list) else [data]
hr = [build_human_readable(data=tms_group) for tms_group in data]
groups = [build_output(data=group, extend_data=extend_data) for group in data]
return CommandResults(outputs_prefix='NASightline.TMSGroup',
outputs_key_field='id',
outputs=groups,
readable_output=tableToMarkdown('TMS group list', hr, removeNull=True),
raw_response=raw_result)
''' MAIN FUNCTION '''
def main() -> None:
try:
command = demisto.command()
params = demisto.params()
if not params.get('User') or not (api_token := params.get('User', {}).get('password')):
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
base_url = urljoin(params['url'], 'api/sp')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
first_fetch = None
if first_fetch_dt := arg_to_datetime(params.get('first_fetch', '3 days')):
first_fetch = first_fetch_dt.isoformat()
max_fetch = min(arg_to_number(params.get('max_fetch', 50)), 100)
alert_class = argToList(params.get('alert_class'))
alert_type = argToList(params.get('alert_type'))
if alert_class and alert_type:
raise DemistoException(
'Cannot filter alerts with both \'Alert Class\' and \'Alert Type\' configured. Either choose '
'the entire class you want to fetch or the specific types from within that class.')
classification = params.get('classification')
importance = cast_importance_to_minimal(params.get('importance'))
ongoing = ONGOING_DICTIONARY.get(params.get('ongoing'))
demisto.debug(f'Command being called is {demisto.command()}')
headers: Dict = {
'X-Arbux-APIToken': api_token
}
client = NetscoutClient(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy,
first_fetch=first_fetch,
max_fetch=max_fetch,
alert_class=alert_class,
alert_type=alert_type,
classification=classification,
importance=importance,
ongoing=ongoing
)
args: dict = demisto.args()
result = ''
if command == 'test-module':
result = test_module(client)
elif command == 'fetch-incidents':
fetch_incidents_command(client)
elif command == 'na-sightline-alert-list':
result = list_alerts_command(client, args)
elif command == 'na-sightline-alert-annotation-list':
result = alert_annotation_list_command(client, args)
elif command == 'na-sightline-mitigation-list':
result = mitigation_list_command(client, args)
elif command == 'na-sightline-mitigation-create':
result = mitigation_create_command(client, args)
elif command == 'na-sightline-mitigation-delete':
result = mitigation_delete_command(client, args)
elif command == 'na-sightline-mitigation-template-list':
result = mitigation_template_list_command(client, args)
elif command == 'na-sightline-router-list':
result = router_list_command(client, args)
elif command == 'na-sightline-managed-object-list':
result = managed_object_list_command(client, args)
elif command == 'na-sightline-tms-group-list':
result = tms_group_list_command(client, args)
else:
raise NotImplementedError(f'Command: {command} is not implemented')
if result:
return_results(result)
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| demisto/content | Packs/NetscoutArborSightline/Integrations/NetscoutArborSightline/NetscoutArborSightline.py | Python | mit | 33,338 |
import tensorflow as tf
import tensorflow.contrib as tc
import numpy as np
import time
import os
from model import Generator, Discriminator
import progressbar as pb
import data_utils
class Improved_WGAN(object):
def __init__(self, data, vocab_processor, FLAGS):
config = tf.ConfigProto(allow_soft_placement = True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config = config)
self.data = data
self.vocab_processor = vocab_processor
self.vocab_size = len(vocab_processor._reverse_mapping)
self.FLAGS = FLAGS
self.img_row = self.data.img_feat.shape[1]
self.img_col = self.data.img_feat.shape[2]
self.alpha = 10.
self.d_epoch = 1
self.gen_path()
def gen_path(self):
# Output directory for models and summaries
timestamp = str(time.strftime('%b-%d-%Y-%H-%M-%S'))
self.out_dir = os.path.abspath(os.path.join(os.path.curdir, "models", timestamp))
print ("Writing to {}\n".format(self.out_dir))
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
self.checkpoint_dir = os.path.abspath(os.path.join(self.out_dir, "checkpoints"))
self.checkpoint_prefix = os.path.join(self.checkpoint_dir, "model")
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
def build_model(self):
self.g_net = Generator(
max_seq_length=self.data.tags_idx.shape[1],
vocab_size=self.vocab_size,
embedding_size=self.FLAGS.embedding_dim,
hidden_size=self.FLAGS.hidden,
img_row=self.img_row,
img_col=self.img_col)
self.d_net = Discriminator(
max_seq_length=self.data.tags_idx.shape[1],
vocab_size=self.vocab_size,
embedding_size=self.FLAGS.embedding_dim,
hidden_size=self.FLAGS.hidden,
img_row=self.img_row,
img_col=self.img_col)
self.seq = tf.placeholder(tf.float32, [None, len(self.data.eyes_idx)+len(self.data.hair_idx)], name="seq")
self.img = tf.placeholder(tf.float32, [None, self.img_row, self.img_col, 3], name="img")
self.z = tf.placeholder(tf.float32, [None, self.FLAGS.z_dim])
self.w_seq = tf.placeholder(tf.float32, [None, len(self.data.eyes_idx)+len(self.data.hair_idx)], name="w_seq")
self.w_img = tf.placeholder(tf.float32, [None, self.img_row, self.img_col, 3], name="w_img")
r_img, r_seq = self.img, self.seq
self.f_img = self.g_net(r_seq, self.z)
self.sampler = tf.identity(self.g_net(r_seq, self.z, reuse=True, train=False), name='sampler')
# TODO
"""
r img, r text -> 1
f img, r text -> 0
r img, w text -> 0
w img, r text -> 0
"""
self.d = self.d_net(r_seq, r_img, reuse=False) # r img, r text
self.d_1 = self.d_net(r_seq, self.f_img) # f img, r text
self.d_2 = self.d_net(self.w_seq, self.img) # r img, w text
self.d_3 = self.d_net(r_seq, self.w_img) # w img, r text
# epsilon = tf.random_uniform([], 0.0, 1.0)
# img_hat = epsilon * r_img + (1 - epsilon) * self.f_img
# d_hat = self.d_net(r_seq, img_hat)
# ddx = tf.gradients(d_hat, img_hat)[0]
# ddx = tf.reshape(ddx, [-1, self.img_row * self.img_col * 3])
# ddx = tf.sqrt(tf.reduce_sum(tf.square(ddx), axis=1))
# ddx = tf.reduce_mean(tf.square(ddx - 1.0) * self.alpha)
# self.g_loss = -tf.reduce_mean(self.d_1)
# self.d_loss = tf.reduce_mean(self.d) - (tf.reduce_mean(self.d_1)+tf.reduce_mean(self.d_2)+tf.reduce_mean(self.d_3))/3.
# self.d_loss = -(self.d_loss - ddx)
# dcgan
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.d_1, labels=tf.ones_like(self.d_1)))
self.d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.d, labels=tf.ones_like(self.d))) \
+ (tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.d_1, labels=tf.zeros_like(self.d_1))) + \
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.d_2, labels=tf.zeros_like(self.d_2))) +\
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.d_3, labels=tf.zeros_like(self.d_3))) ) / 3
self.global_step = tf.Variable(0, name='g_global_step', trainable=False)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_updates = tf.train.AdamOptimizer(self.FLAGS.lr, beta1=0.5, beta2=0.9).minimize(self.d_loss, var_list=self.d_net.vars)
self.g_updates = tf.train.AdamOptimizer(self.FLAGS.lr, beta1=0.5, beta2=0.9).minimize(self.g_loss, var_list=self.g_net.vars, global_step=self.global_step)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(tf.global_variables())
def train(self):
batch_num = self.data.length//self.FLAGS.batch_size if self.data.length%self.FLAGS.batch_size==0 else self.data.length//self.FLAGS.batch_size + 1
print("Start training WGAN...\n")
for t in range(self.FLAGS.iter):
d_cost = 0
g_coat = 0
for d_ep in range(self.d_epoch):
img, tags, _, w_img, w_tags = self.data.next_data_batch(self.FLAGS.batch_size)
z = self.data.next_noise_batch(len(tags), self.FLAGS.z_dim)
feed_dict = {
self.seq:tags,
self.img:img,
self.z:z,
self.w_seq:w_tags,
self.w_img:w_img
}
_, loss = self.sess.run([self.d_updates, self.d_loss], feed_dict=feed_dict)
d_cost += loss/self.d_epoch
z = self.data.next_noise_batch(len(tags), self.FLAGS.z_dim)
feed_dict = {
self.img:img,
self.w_seq:w_tags,
self.w_img:w_img,
self.seq:tags,
self.z:z
}
_, loss, step = self.sess.run([self.g_updates, self.g_loss, self.global_step], feed_dict=feed_dict)
current_step = tf.train.global_step(self.sess, self.global_step)
g_cost = loss
if current_step % self.FLAGS.display_every == 0:
print("Epoch {}, Current_step {}".format(self.data.epoch, current_step))
print("Discriminator loss :{}".format(d_cost))
print("Generator loss :{}".format(g_cost))
print("---------------------------------")
if current_step % self.FLAGS.checkpoint_every == 0:
path = self.saver.save(self.sess, self.checkpoint_prefix, global_step=current_step)
print ("\nSaved model checkpoint to {}\n".format(path))
if current_step % self.FLAGS.dump_every == 0:
self.eval(current_step)
print("Dump test image")
def eval(self, iters):
z = self.data.fixed_z
feed_dict = {
self.seq:self.data.test_tags_idx,
self.z:z
}
f_imgs = self.sess.run(self.sampler, feed_dict=feed_dict)
data_utils.dump_img(self.FLAGS.img_dir, f_imgs, iters)
| m516825/Conditional-GAN | improved_WGAN.py | Python | mit | 6,459 |
import os
TREEHERDER_CONFIG = os.environ.get('TREEHERDER_CONFIG') or 'credentials.ignore'
config = {
"find_links": [
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"treeherder_url": "https://treeherder.allizom.org",
# Paths are relative to 'base_work_dir'
"treeherder_credentials_path": os.path.join(TREEHERDER_CONFIG, "treeherder-staging-credentials.json"),
"s3_credentials_path": os.path.join(TREEHERDER_CONFIG, "s3-credentials.json"),
"group_name": "VideoPuppeteer",
"group_symbol": "VP",
"job_name": "MSE Video Playback",
"job_symbol": "m",
# See https://github.com/mozilla/treeherder/blob/master/treeherder/model/sample_data/job_data.json.sample
"job_description": "firefox-media-tests (video playback)",
"job_reason": "scheduled",
"job_who": "PlatformQuality",
# For log parsing
"log_date_format": '%Y-%m-%d %H:%M:%S'
}
| sydvicious/mozplatformqa-jenkins | external-media-tests/config/treeherder_submission.py | Python | mpl-2.0 | 926 |
#!/usr/bin/env python
import gzip
import os
import os.path
def main():
apt_log_path = '/var/log/apt'
dirlist = os.listdir(apt_log_path)
# somehow this dict will magically order all the numerical keys
history_files = {}
for filename in dirlist:
if filename.startswith('history.log'):
if filename[len('history.log') + 1:-3] == '':
file_number = 0
else:
file_number = int(filename[len('history.log') + 1:-3])
history_files[file_number] = filename
installed = []
removed = []
# go through the list of history files in reverse order
for index in history_files:
filename = history_files[len(history_files) - index - 1]
if filename.endswith('gz'):
history_file = gzip.open(os.path.join(apt_log_path, filename))
else:
history_file = open(os.path.join(apt_log_path, filename))
for line in history_file:
if line.startswith('Commandline'):
command_words = line.split()[2:]
if command_words[0].startswith('-'):
del command_words[0]
if command_words[0] == 'install':
for package in command_words[1:]:
if package.startswith('-'):
continue
if package not in installed:
installed.append(package)
if package in removed:
removed.remove(package)
elif (command_words[0] == 'remove' or
command_words[0] == 'autoremove'):
for package in command_words[1:]:
if package.startswith('-'):
continue
if package not in removed:
removed.append(package)
if package in installed:
installed.remove(package)
'''
commands.append(line.split()[2])
if command_words[0] not in commands:
commands.append(command_words[0])
for command_word in command_words:
if command_word.startswith('-'):
continue
if line.split()[2] == 'install':
elif line.split()[2] == 'remove' or line.split()[2] == 'autoremove':
commands.append(line.split()[2])
'''
'''
Commandline: apt-get install gnote
Commandline: apt-get remove footnote
'''
if __name__ == '__main__':
main() | bmaupin/junkpile | python/graveyard/misc/get-apt-history.py | Python | mit | 2,627 |
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = Geometric(0.7)
size = 10000
sample = distribution.getSample(size)
factory = GeometricFactory()
estimatedDistribution = factory.build(sample)
print("distribution=", repr(distribution))
print("Estimated distribution=", repr(estimatedDistribution))
estimatedDistribution = factory.build()
print("Default distribution=", estimatedDistribution)
estimatedDistribution = factory.build(
distribution.getParameter())
print("Distribution from parameters=", estimatedDistribution)
estimatedGeometric = factory.buildAsGeometric(sample)
print("Geometric =", distribution)
print("Estimated geometric=", estimatedGeometric)
estimatedGeometric = factory.buildAsGeometric()
print("Default geometric=", estimatedGeometric)
estimatedGeometric = factory.buildAsGeometric(
distribution.getParameter())
print("Geometric from parameters=", estimatedGeometric)
except:
import sys
print("t_GeometricFactory_std.py", sys.exc_info()[0], sys.exc_info()[1])
| aurelieladier/openturns | python/test/t_GeometricFactory_std.py | Python | lgpl-3.0 | 1,185 |
# encoding: utf-8
"""
Ow.ly url shortner api implementation
Located at: http://ow.ly/api-docs
Doesnt' need anything from the app
"""
from .base import BaseShortener
from ..exceptions import ShorteningErrorException, ExpandingErrorException
class OwlyShortener(BaseShortener):
api_url = 'http://ow.ly/api/1.1/url/'
def __init__(self, **kwargs):
if not kwargs.get('api_key', False):
raise TypeError('api_key is missing from kwargs')
self.api_key = kwargs.get('api_key')
super(OwlyShortener, self).__init__(**kwargs)
def short(self, url):
shorten_url = '{0}{1}'.format(self.api_url, 'shorten')
data = {'apiKey': self.api_key, 'longUrl': url}
response = self._get(shorten_url, params=data)
if response.ok:
try:
data = response.json()
except ValueError:
raise ShorteningErrorException('There was an error shortening'
' this url')
return data['results']['shortUrl']
raise ShorteningErrorException('There was an error shortening this '
'url - {0}'.format(response.content))
def expand(self, url):
expand_url = '{0}{1}'.format(self.api_url, 'expand')
data = {'apiKey': self.api_key, 'shortUrl': url}
response = self._get(expand_url, params=data)
if response.ok:
try:
data = response.json()
except ValueError:
raise ExpandingErrorException('There was an error shortening'
' this url')
return data['results']['longUrl']
raise ExpandingErrorException('There was an error shortening this '
'url - {0}'.format(response.content))
| YarnSeemannsgarn/pyshorteners | pyshorteners/shorteners/owly.py | Python | mit | 1,859 |
from twisted.internet.protocol import ClientFactory
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
from twisted.conch.telnet import TelnetProtocol
from twisted.internet import stdio
class StdinProtocol(LineReceiver):
from os import linesep as delimiter
def __init__(self, network):
self.network = network
def write(self, text):
self.transport.write('\007' + text)
def print_prompt(self):
self.write('#> ')
def connectionMade(self):
self.print_prompt()
def lineReceived(self, line):
self.network.transport.write(line)
self.network.transport.write('\r\n')
self.print_prompt()
class VikiProtocol(LineReceiver, TelnetProtocol):
def connectionMade(self):
self.stdin = StdinProtocol(self)
stdio.StandardIO(self.stdin)
def lineReceived(self, line):
self.stdin.write('\033[2;K' + line + '\r\n\r\n')
self.stdin.print_prompt()
class VikiFactory(ClientFactory):
def buildProtocol(self, addr):
p = VikiProtocol()
p.factory = self
return p
def main():
reactor.connectTCP("localhost", 8123, VikiFactory())
reactor.run()
if __name__ == '__main__':
main()
| happz/viki | client.py | Python | mit | 1,171 |
# -*- coding: utf-8 -*-
"""scheduler for regular tasks"""
import atexit
import collections
import contextlib
import datetime
import functools
import hashlib
import logging
import os
import time
import threading
import psutil
import callable_ref
LOG = logging.getLogger(__name__)
Env = collections.namedtuple('Env', ['ctx', 'mongo', 'scheduler'])
class RemoveJob(Exception):
"""do not schedule job anymore"""
pass
class RestartJob(Exception):
"""repeat job immediately"""
pass
class DeferJob(Exception):
"""retry job after timeout"""
pass
class ReplaceJob(Exception):
"""replace job function"""
def __init__(self, func):
super(ReplaceJob, self).__init__()
self.func = func
def task_logging(func):
"""Decorator for tasks logging"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Logs run progress & exception"""
LOG.info('scheduler [%s] start, args=(%s)', func.__name__,
callable_ref.Callable.printable_params(*args, **kwargs))
try:
result = func(*args, **kwargs)
LOG.info('scheduler [%s] finish, res=%s',
func.__name__, repr(result))
return result
# reraise *Job exceptions
except (RemoveJob, RestartJob, DeferJob, ReplaceJob):
raise
# pylint: disable=broad-except
except Exception:
import raven.base
if raven.base.Raven:
raven.base.Raven.captureException()
LOG.exception('scheduler [%s] exception', func.__name__)
raise RestartJob()
return wrapper
def run_once_direct(job_func):
"""Run job & cancel it then"""
job_func()
raise RemoveJob()
def queue_jobs(env, jobs_list):
"""Runs first job from the list and schedules the rest of list"""
next_job = jobs_list.pop(0)
job_func = env.scheduler.serializer.loads(next_job)
try:
job_func()
except (RemoveJob, ReplaceJob):
assert False, \
"Raising RemoveJob/ReplaceJob in queued jobs is not allowed"
# Save the rest of the queue as replacement for the current task
LOG.info('scheduler: pop queue job ' + next_job)
queue_tail = functools.partial(queue_jobs, env, jobs_list)
if jobs_list:
raise ReplaceJob(queue_tail)
class JobProxy(object):
"""Job with ability to be serialized"""
JOB_PROPS = {'interval', 'unit', 'at_time', 'last_run', 'next_run',
'period', 'start_day', 'exec_once', '_id'}
JOB_PROPS_DUMPS = {
'at_time': lambda timestamp: timestamp.isoformat(),
'period': lambda period: str(int(period.total_seconds())),
}
JOB_PROPS_LOADS = {
'at_time': lambda str_val: datetime.datetime.strptime(
str_val, '%H:%M:%S').time(),
'period': lambda str_val: datetime.timedelta(seconds=int(str_val)),
}
def __init__(self, job, scheduler):
self._src_job = job
self.scheduler = scheduler
# run ('til return) only once
self.exec_once = False
# replace do method to our own
self.job_do = job.do
job.do = self.do_wrapped
setattr(job, 'do_once', self.do_once)
if not getattr(job, '_id', None):
self._id = None
def get_id(self):
"""Try to avoid changing _id member which should be const"""
return self._id
def do_wrapped(self, job_func, *args, **kwargs):
"""Proxy to original job"""
assert self._src_job.job_func is None
self.job_do(job_func, *args, **kwargs)
job_func_str = self.scheduler.serializer.dumps(self.job_func)
self._id = hashlib.md5(job_func_str).hexdigest()
self.scheduler.serialize_job(self)
return self
def do_once(self, job_func, *args, **kwargs):
"""Proxy to original with ability to run ('til return) only once"""
self.exec_once = True
return self.do_wrapped(job_func, *args, **kwargs)
def __getattr__(self, *args):
"""all methods binding"""
return getattr(self._src_job, args[0])
def __repr__(self):
"""Serializes job's callable"""
return self.scheduler.serializer.dumps(self.job_func)
@staticmethod
def from_dict(job, scheduler, job_dict):
"""Init job fields from db data"""
job_func = scheduler.serializer.loads(job_dict['job_func'])
for prop in job_dict.keys():
if job_dict[prop] and prop in JobProxy.JOB_PROPS_LOADS:
loads = JobProxy.JOB_PROPS_LOADS[prop]
setattr(job, prop, loads(job_dict[prop]))
else:
setattr(job, prop, job_dict[prop])
# schedule lib internal requirement
job.do(job_func)
return JobProxy(job, scheduler)
def to_dict(self):
""":returns dict with all attributes"""
props = {'job_func': self.scheduler.serializer.dumps(self.job_func)}
for prop in JobProxy.JOB_PROPS:
props[prop] = getattr(self, prop)
if props[prop] and prop in JobProxy.JOB_PROPS_DUMPS.keys():
props[prop] = JobProxy.JOB_PROPS_DUMPS[prop](props[prop])
return props
def details(self):
""":returns tuple with time of the next run and job structure"""
return self.next_run, self.to_dict()
class JobsQueue(object):
"""Jobs queue collects jobs list to run one-by-one"""
def __init__(self, env):
self.env = env
self._serialized_jobs = []
@property
def job(self):
""":returns partial to call for executing jobs consequentially"""
assert self._serialized_jobs, "Cannot create queue job for empty list"
return functools.partial(queue_jobs, self.env,
self._serialized_jobs)
def add(self, job_func, *args, **kwargs):
"""Adds job to queue (stored serialized)"""
job_func = functools.partial(job_func, *args, **kwargs)
self._serialized_jobs.append(
self.env.scheduler.serializer.dumps(job_func))
class Scheduler(object):
"""Schedule background tasks"""
def __init__(self, ctx, mongo):
import schedule # trick to hide external module from pickling attempts
self._job_factory = schedule.Job
# scheduler in background
self._worker_thread = threading.Thread(target=self._worker,
name='scheduler')
self._worker_thread.daemon = True # thread dies with main
self._stop_event = threading.Event()
atexit.register(self.stop)
# env to de/serialize jobs
self._ctx = ctx
with self._ctx:
self._coll = mongo.db.get_collection('jobs')
self._coll.create_index([('when', -1)])
self._coll.create_index([('who', -1)])
self.env = Env(ctx=ctx, mongo=mongo, scheduler=self)
self.serializer = callable_ref.Callable(self.env)
def start(self):
"""Thread starter"""
LOG.info('scheduler thread start')
assert not self._worker_thread.is_alive()
self._stop_event.clear()
self._worker_thread.start()
def stop(self):
"""Awaiting current task_logging finish and shutdown then"""
LOG.info('scheduler thread stop')
if self._worker_thread.is_alive():
self._stop_event.set()
self._worker_thread.join()
def every(self, interval=1):
"""Schedule a new periodic job."""
job = self._job_factory(interval)
return JobProxy(job, self)
def do_async(self, job_func, *args, **kwargs):
"""Run job async immediately"""
self.every().second.do_once(job_func, *args, **kwargs)
@contextlib.contextmanager
def create_jobs_queue(self):
"""Creates context manager, executable on finish"""
yield JobsQueue(self.env)
def serialize_job(self, job, update=False):
"""Serialize and save job to MongoDB for async run"""
assert job.get_id()
if update and job.exec_once:
return self._remove_job(job)
timestamp, job_attributes = job.details()
update_op = '$set' if update else '$min'
with self._ctx:
LOG.info('scheduler: queue job %s at %s (%s)', job.get_id(),
timestamp, update_op)
self._coll.update_one(
{'_id': job.get_id()},
{
update_op: {
'when': timestamp,
},
'$setOnInsert': {
'what': job_attributes,
'who': None,
},
}, upsert=True)
def _restart_job(self, job):
"""Detach job from current process"""
with self._ctx:
req = self._coll.update_one(
{'_id': job.get_id()},
{'$set': {'who': None}}
)
assert req.matched_count == 1
def _defer_job(self, job):
"""Postpone job for some time"""
with self._ctx:
req = self._coll.update_one(
{'_id': job.get_id()},
{'$set': {'who': None,
'when': (datetime.datetime.now() +
datetime.timedelta(minutes=1))}}
)
assert req.matched_count == 1
def _replace_job(self, job, job_func):
"""Replace job_func for task"""
assert not callable_ref.equals_soft(job.job_func, job_func)
job.job_func = job_func
with self._ctx:
req = self._coll.update_one(
{'_id': job.get_id()},
{'$set': {'who': None,
'what': job.to_dict()}}
)
assert req.matched_count == 1
def _remove_job(self, job):
"""Remove job from pending queue"""
with self._ctx:
req = self._coll.delete_one({
'_id': job.get_id(),
})
assert req.deleted_count == 1
def _find_next_job(self):
"""Lookup for available jobs to execute"""
with self._ctx:
workers = self._coll.distinct('who')
unused = [pid for pid in workers
if not (pid and psutil.pid_exists(pid))]
job = self._coll.find_one_and_update(
{
'when': {'$lte': datetime.datetime.now()},
'who': {'$in': unused},
},
{
'$set': {'who': os.getpid()}
},
sort=[('when', -1)])
return job
def _worker(self):
"""Background thread function"""
while not self._stop_event.is_set():
job_dict = self._find_next_job()
if not job_dict:
time.sleep(0)
continue
job = JobProxy.from_dict(self._job_factory(1),
self, job_dict['what'])
LOG.info('scheduler: enter job %s for process #%d (after %s)',
job.get_id(), os.getpid(), job_dict['who'])
self._run_job(job)
LOG.info('scheduler: leave job %s for process #%d',
job.get_id(), os.getpid())
def _run_job(self, job):
"""Process job results"""
try:
LOG.info('scheduler: start job ' + job.get_id())
with self.env.ctx:
job_res = job.run()
LOG.info('scheduler: finish job ' + job.get_id())
self.serialize_job(job, update=True)
return job_res
except RemoveJob:
LOG.info('scheduler: remove job ' + job.get_id())
self._remove_job(job)
except RestartJob:
LOG.info('scheduler: restart job ' + job.get_id())
self._restart_job(job)
except DeferJob:
LOG.info('scheduler: defer job ' + job.get_id())
self._defer_job(job)
except ReplaceJob as new_job:
LOG.info('scheduler: replace job ' + job.get_id())
self._replace_job(job, new_job.func)
return None
| n8v-guy/slag | scheduler.py | Python | mit | 12,215 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
** (fr) **
Ce bot extrait l'infobox d'un article et effectue certaines modifications
dessus.
Support des modifications mineures et majeures (la page ne sera mise à jour
uniquement lorsqu'au moins une modification majeure aura été effectuée,
et, dans ce cas seulement, les modifications mineures seront prises
en compte).
Les paramètres optionneles suivants sont supportés :
-dry N'autorise aucune modification réelle, mais montre
seulement ce qui aurait été modifié.
-reprise: Permet de reprendre le traitement à une page donnée.
-debug Passe en mode debug.
-async Active la publication asynchrone des pages : leur
publication sera faite durant un laps de temps libre
afin de permettre une meilleure optimisation du temps.
TODO/À faire :
- introduction des arguments avec la commande pywikibot.handleArgs()
✓ fait pour le(s) paramètre(s) suivant(s):
* -reprise:
➙ reste d'autres paramètres à introduire
- gestion automatique de la variable titreModeleOldRe en prenant
les titres des pages de redirection du modèle
(i.e. titreModeleOldRe = u'titre modele|redirection 1|redirection 2')
✓ fait : en phase de test (cf. ƒ2)
- passage sur pywikibot rewrite
✓ fait
- commenter les différents remplacements effectués pour éviter
les erreurs (cf. ƒ4)
✗ pas fait
** (en) **
(NB: Maybe this version is less developped than the French one.
So if you speak French, please read the version above)
This bot extracts the infobox of an article and do some changes on it.
Support of minor and major modifications (the page will be updated
only when at least one major change will have be done, and, in this case
only, minor changes are so taken into account).
The following optional parameters are supported:
-dry Doesn't do any real changes, but only shows what
would have been changed.
-reprise: Allows to restart the treatment from a given page.
-debug Go in the debug mode.
-async Put the pages asynchronously.
"""
#
# (C) Pywikipedia bot team, 2006-2010
# (C) Toto Azéro, 2010, 2011
#
# Distribué sous licence MIT.
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: basic4_refonte.py 1005 $'
import pywikibot
from pywikibot import pagegenerators, catlib
import re
class InfoboxBot:
def __init__(self, dry, reprise, titreReprise, debug, async):
"""
Constructor. Parameters:
* generator - The page generator that determines on which pages
to work on.
* dry - If True, doesn't do any real changes, but only shows
what would have been changed.
"""
# self.generator = generator
self.dry = dry
self.reprise = reprise
self.titreReprise = titreReprise
self.debug = debug
self.async = async
self.site = pywikibot.getSite()
# (fr) : Résumé de modification
# (en) : Edit summary
self.summary = u"[[WP:RBOT]]) (Bot: Correction typographique dans l'[[Modèle:Infobox Footballeur|Infobox Footballeur]]"
def debugMode(self):
print u'dry = %r' % self.dry
print u'reprise = %r' % self.reprise
print u'titreReprise = %s' % self.titreReprise
def run(self):
#############################################
##### (fr) : Paramètres modifiables
##### (en) : Modifiable parameters
#############################################
exceptions = []
titreModeleOld = u'Infobox Stade'
## Obsolète : cf. ƒ2 plus bas
# ‹!› Ne pas mettre de parenthèses !
#titreModeleOldRe = u'[iI]nfobox Émission de télévision|[iI]nfobox [Tt]élévision|[Ii]nfobox Télévision nouveau|[Ii]nfobox TV émission'
modeleOld = pywikibot.Page(self.site, u'Modèle:%s' % titreModeleOld)
modeleNew = u'\\1Infobox Stade'
checkNamespace = True
checkNumberNamespace = 0
onlySaveIfMajorChange = True
ajoutParametresAbsents = False
alignementSignesEgal = True
useCategories = False
listeTitresCategories = [u"Détection temporaire paramètre float Infobox ville"]
#### Expérimental : ƒ2 ####
"""
Recherche automatique des redirections afin de remplir la
variable titreModeleOldRe avec une expression régulière.
‹!› La variable titreModeleOldRe ne doit pas contenir de
parenthèses codantes, auquel cas les expressions régulières
utilisant la variable seraient faussées.
"""
premiereLettreMinuscule = titreModeleOld[0:1].lower()
premiereLettreMajuscule = titreModeleOld[0:1].upper()
resteTitre = titreModeleOld[1:]
titreModeleOldRe = u'[%s%s]%s' % (premiereLettreMinuscule, premiereLettreMajuscule, resteTitre)
for page in modeleOld.getReferences(redirectsOnly = True):
premiereLettreMinuscule = page.title(asLink = False)[0:1].lower()
premiereLettreMajuscule = page.title(asLink = False)[0:1].upper()
resteTitre = titreModeleOld[1:]
titreModeleOldRe += u'|[%s%s]%s' % (premiereLettreMinuscule, premiereLettreMajuscule, resteTitre)
############################
## Obsolète : Remplacé par la détection de l'argument
## '-reprise:' (voir fonction main()).
# (fr) : Activer ce paramètre permet de reprendre le
# traitement à partir de la page donnée
# (en) : Enable this parameter allows restarting the treatment
# form the page given
#reprise = True
#titreReprise = u"La Folle Route"
##### (fr) : Modifications majeures #####
##### (en) : Major changes #####
## (fr) : Liste de recherches ##
## (en) : Find list ##
listeRechercheElements = {
#1 : u'\n? *\| *surnom *= *.*'
#1 : u'(\n? *\| *département *= [^\n]*\[ *\[ *[hH]autes[ -][pP]yrénées *\] *\](\n.*)*géoloc-département) *= *(\n|\})',
#5 : u'(\n? *\| *département *= [^\n]*\[ *\[ *[hH]auts[ -][dD]e[ -][sS]eine]**\] *\](\n.*)*géoloc-département) *= *(\n|\})',
#6 : u'(\n? *\| *département *= [^\n]*\[ *\[ *[sS]eine[ -][sS]aint[ -][dD]enis *\] *\](\n.*)*géoloc-département) *= *(\n|\})',
#7 : u'(\n? *\| *département *= [^\n]*\[ *\[ *[Vv]al[ -][Dd]e[ -][mM]arne *\] *\](\n.*)*géoloc-département) *= *(\n|\})'
#8 : u'(\n? *\| *département *= [^\n]*\[ *\[ *[sS]eine *\] *\](\n.*)*géoloc-département) *= *(\n|\})',
#9 : u'(\n? *\| *département *= [^\n]*\[ *\[ *[sS]eine *\] *\](\n.*)*géoloc-département) *= *(\n|\})'
#1 : u'(\n? *\| *)site production( *= *.*)',
#2 : u"(\n? *\| *(précédé|suivi) *par *= *)''(.*)''"
#3 : u'(\n? *\| *)carte2( *= *.*)',
#4 : u'(\n? *\| *taille-logo *= *.*) *(px|cm|mm) *'
#1 : re.compile(u'(alt moy *= *[0-9]*) *m *'),
#2 : re.compile(u'(\| *)arrondissement( *= *)(.*)')
#6 : u'(\n *\|.*) *= *\{ *\{ *[Ff][Oo][Rr][Mm][Aa][Tt][Nn][Uu][Mm] *: *([0-9]*) *\} *\}',
#7 : u'(\n? *\| *région) *= *\[ *\[ *Région (.*)',
#8 : u'(\n? *\| *région) *= *\[ *\[.*\| *([bB]retagne|[cC]entre).*\] *\]',
#9 : u'(\n? *\| *région) *= *\[ *\[.*\| *[Rr]éunion.*\] *\]',
#10: u'(.*) *= *[Nn]\.? *[Cc]\.? *',
#1 : u'(\n? *\| *(longitude|latitude)) *= *([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*[Ee](st)?',
#2 : u'(\n? *\| *(longitude|latitude)) *= *([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*([Oo](uest)?|[Ww](est)?)',
#3 : u'(\n? *\| *(longitude|latitude)) *= *([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*[Nn](ord)?',
#4 : u'(\n? *\| *(longitude|latitude)) *= *([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*([0-9]*)[°\'‘’" ]*[Ss](ud)?'
#6 : u'(\n? *\| *(longitude|latitude) *= .*)//',
#1 : u'\n? *\| *(float) *= *.*', ## Paramètres à supprimer
#8 : re.compile(u'(\n?) *\| *Région *= *(.*)'),
#1 : re.compile(ur'\| *coordonnées *=.*(.*\| *latitude *= *[^\n]+.*\| *longitude *= *[^\n]+\n|.*\| *longitude *= *[^\n]+.*\| *latitude *= *[^\n]+\n)', re.DOTALL),
#2 : re.compile(ur'(\| *latitude *= *[^\n]+.*\| *longitude *= *[^\n]+.*|\| *longitude *= *[^\n]+.*\| *latitude *= *[^\n]+.*)\| *coordonnées *=[^\n]*\n?', re.DOTALL),
#3 : u'(\n?) *\| *coordonnées *= *\{ *\{ *[Cc]oord *\| *([0-9\.-]+) *\| *([0-9\.]+) *\| *([0-9\.]+) *\| *([NS]) *\| *([0-9\.-]+) *\| *([0-9\.]+) *\| *([0-9\.]+) *\| *([EW])[^.\}]*\} *\}',
#4 : u'(\n?) *\| *coordonnées *= *\{ *\{ *[Cc]oord *\| *([0-9\.-]+) *\| *([NS]) *\| *([0-9\.-]+) *\| *([EW])[^.\}]*\} *\}',
#5 : u'(\n?) *\| *coordonnées *= *\{ *\{ *[Cc]oord *\| *([0-9\.-]+) *\| *([0-9\.-]+)[^.\}]*\} *\}',
#6 : u'(\n?) *\| *coordonnées *= *[^0-9\n]*'
#5 : re.compile(ur'\{ *\{ *[Cc]oord[\n]*\} *\}(.*\| *latitude *= *[^\n]+.*\| *longitude *= *[^\n]+.*|.*\| *longitude *= *[^\n]+.*\| *latitude *= *[^\n]+)', re.DOTALL)
#13 : u'(\n? *\| *(longitude|latitude)) *= *([0-9\.]*) *[Ee](st)?'
#1 : u"((\n? *\| *)image *=.*) ((\$®\$|\|)[0-9]+px *)",
#2 : u"(\n? *\| *)image *= *\[ *\[ *([iI]mage|[Ff]ichier|[Ff]ile) *: *([^\|]+) *(((\||\$®\$)[^\|]*)?) *\] *\]"
1 : u"→"
}
## (fr) : Liste de remplacements ##
## (en) : Replace list ##
listeRemplacementsElements = {
1 : u" {{info|→|prêt}}"
#2 : u"\\1image = \\3\\1légende = \\4"
#2 : u"\\1\\3"
#1 : u'\\1 = Hautes-Pyrénées\\3',
#3 : u'\\1 | latitude = \\2/\\3/\\4/\\5\n | longitude = \\6/\\7/\\8/\\9',
#4 : u'\\1 | latitude = \\2/\\3\n | longitude = \\4/\\5',
#5 : u'\\1 | latitude = \\2\n | longitude = \\3',
#6 : u'\\1 | latitude = \n | longitude = '
#1 : u'|nom de division = [[Départements d\'Haïti|Département]]\n|division =\\3',
#2 : u'|nom de division2 = Arrondissement\n|division2 = \\3'
#7 : u'\\1 = [[\\2',
#8 : u'\\1 = [[Région \\2|\\2]]',
#9 : u'\\1 = [[La Réunion|Réunion]]',
#10 : u'\\1 = ',
#1 : u'\\1 = \\3/\\4/\\5/E',
#2 : u'\\1 = \\3/\\4/\\5/W',
#3 : u'\\1 = \\3/\\4/\\5/N',
#4 : u'\\1 = \\3/\\4/\\5/S'
}
## (fr) : Liste d'ajouts ##
## (en) : Adds list ##
##### (fr) : Modifications mineures #####
##### (en) : Minor changes #####
listeConditionsPositivesAjouts = {
#1 : u""
}
listeConditionsNegativesAjouts = {
#1 : u"\| *carte *= *[EÉée]tats-Unis/[fF]loride"
}
listeElementsAAjouter = {
#1 : u"| carte=États-Unis/Floride"
}
## (fr) : Liste de recherches ##
## (en) : Find list ##
listeRechercheElementsMineure = {
#1 : u'(\| *image *=.*\n) *\| *([^=]*\n)',
#2 : u'(\n? *\| *(longueur|largeur) *= *[0-9]*),'
}
## (fr) : Liste de remplacements ##
## (en) : Replace list ##
listeRemplacementsElementsMineure = {
#1 : u'\\1 | légende = \\2',
#2 : u'\\1.'
}
#############################################
#### (fr) : Début du traitement
#### (en) : Beginning of the treatment
#############################################
if not self.debug:
if not useCategories:
listePages = [page for page in modeleOld.getReferences(follow_redirects=False, withTemplateInclusion=True,
onlyTemplateInclusion=True, redirectsOnly=False)]
else:
listePages = []
for titreCategorie in listeTitresCategories:
cat = pywikibot.Category(self.site, titreCategorie)
listePages.extend(list(cat.articles()))
pywikibot.output(u'Taille de la liste = %i' % len(listePages))
listePagesARetirer = []
if self.reprise:
for page in listePages:
if page.title() == self.titreReprise:
break
listePagesARetirer.append(page)
if checkNamespace:
for page in listePages:
if page.namespace() != checkNumberNamespace and not page in listePagesARetirer:
listePagesARetirer.append(page)
for page in listePagesARetirer:
listePages.remove(page)
elif self.debug:
listePages = [pywikibot.Page(self.site, u'User:Toto Azéro/Bac à sable')]
pywikibot.output(u"Nombre de pages à traiter : %i" % len(listePages))
for page in listePages:
if onlySaveIfMajorChange:
possibiliteSauvegarder = False
else:
possibiliteSauvegarder = True
text = self.load(page)
pywikibot.output(u"\n> \03{lightblue}Traitement de %s\03{default} <" % page.title())
############
### À utiliser uniquement dans les cas exceptionnels :
### permet d'autoriser la sauvegarde dans un cas précis
global exception_possibiliteSauvegarder
exception_possibiliteSauvegarder = False
#textOld = text
#text = re.sub(u'(\n? *\| *date-sans *=[^\n]*) *\} *\} *(([\n]+.*)*géoloc-département *= *)', u'\\1\\2}}', text)
#if text != textOld:
# exception_possibiliteSauvegarder = True
#############
#################################################################
############ TRAVAIL SUR L'INFOBOX EXTRAITE DU TEXTE ############
#################################################################
##### Délimiter le début de l'infobox
try:
matchDebut = re.search(u'\{ *\{ *(%s)' % titreModeleOldRe, text).group(0)
except:
pywikibot.output(u"\03{lightred}Absence du modèle %s sur la page %s\03{default}" % (titreModeleOld, page.title()))
continue
positionMatchDebut = text.index(matchDebut)
extraitText = text[positionMatchDebut:]
#### Délimiter la fin possible de l'infobox
#### (i.e. les premiers u'}}' trouvés)
# ‹!› Le résultat n'est pas forcément la fin réelle
# de l'infobox : en effet, cet ordre ne tient pas
# compte de probables modèles présents dans l'infobox
# et s'arrêtera aux premiers u'}}' trouvés, quels
# qu'ils soient !
matchFin = re.search(u' *\} *\}', extraitText).group(0)
positionMatchFin = extraitText.index(matchFin)
extraitText = extraitText[0:(positionMatchFin + len(matchFin))]
# NB : dans extraitText[0:(positionMatchFin + len(matchFin))],
# on ajoute la longueur des u'}}' pour qu'ils se
# trouvent bien dans l'extrait traité
# Principe de la boucle : tant que le nombre de u'{'
# et celui de u'}' ne sont pas équibilibrés, la variable
# extraitText est agrandie jusqu'au u'}}' suivants trouvés
# dans le texte.
positionMatchFin = text.index(extraitText)
resteText = text[positionMatchFin + len(extraitText):]
while extraitText.count('{') != extraitText.count('}'):
matchFin = re.search(u'([^\{\}]* *\} *\}|(\{ *\{[^\{\}]*\} *\})+[^\{\}]* *\} *\})', resteText).group(0)
positionMatchFin = resteText.index(matchFin)
extraitTextOld = extraitText
extraitText = extraitText + resteText[0:(positionMatchFin + len(matchFin))]
resteText = resteText[positionMatchFin + len(matchFin):]
### On travaille sur cet extrait puis on effectuera
### les remplacements sur le texte à la fin
extraitTextNew = extraitText
##### Normalisation de l'infobox pour éviter des problèmes dans son traitement #####
## Enlever les u'|' inutiles et les placer au début du paramètre suivant ##
# ex : |cp=66360| \n maire=Gérard Rabat
# → |cp=66360\n | maire=Gérard Rabat
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\|[^\[\]\{\}]*=.*)\|\n'), u'\\1\n |', exceptions)
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\|? *([^\[\]\{\}]*=.*)\| *\n *\|'), u'| \\1\n |', exceptions)
####
### Ce qui suit est spécifique
### à l'infobox {{Infobox Commune de France}}
## Déplacement d'une image mise à côté du nom de la commune (le blason) dans le paramètre 'armoiries' ##
# ex : |nomcommune = Saint-Didier-en-Velay [[Image:Blason_Saint-Didier-en-Velay_43.svg|80 px]]
# → |nomcommune = Saint-Didier-en-Velay […] |armoiries = Blason_Saint-Didier-en-Velay_43.svg
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\| *nomcommune *= *[^\|]*\n?)\[ *\[ *([fF]ichier|[Ii]mage|[Ff]ile) *: *([^\|]*)\|? *([Cc]enter|[Cc]entre|[Ll]eft|[Rr]ight)?[^\]]*\] *\]'), u'\\1|armoiries = \\3', exceptions)
## On fait passer les u'}}' à la ligne s'ils sont en bout de ligne d'un paramètre ##
# ex : u'| géoloc-département = | }}'
# → u'| géoloc-département = \n}}' #
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\n *\|? *.*=.*) *\|+ *\} *\}$'), u'\\1\n}}', exceptions)
# ex : u'| géoloc-département = }}'
# → u'| géoloc-département = \n}}' #
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\n *\|? *.*=.*) *\} *\}$'), u'\\1\n}}', exceptions)
####
## On fait passer un éventuel u'|' présent en bout de ligne du début de l'infobox ##
# ex : u'{{Infobox Commune de France|\n\|'
# → u'{{Infobox Commune de France\n|'
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\{ *\{ *(%s[^\n]*) *\| *\n+ *\|' % titreModeleOldRe), u'{{\\1\n | ', exceptions)
# ex : u'{{Infobox Commune de France|'
# → u'{{Infobox Commune de France\n|'
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\{ *\{ *(%s[^\n]*) *\| *\n' % titreModeleOldRe), u'{{\\1\n | ', exceptions)
## Suppression de plusieurs u'|' successifs ##
# ex : {{Infobox Commune de France||nomcommune=Baudrecourt
# → {{Infobox Commune de France|nomcommune=Baudrecourt
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\{ *\{ *(%s.*)) *(\| *){2,}' % titreModeleOldRe), u'\\1|', exceptions)
#print u'0-5\n' + extraitTextNew
## Faire passer les u'|' présents en fin de ligne en début de ligne suivante ##
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\n?[^\[\]\{\}\n]*=[^=\n]*) *\| *\n'), u'\\1\n | ', exceptions)
#extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\n?[^=]*=[^=]*) *\| *\n'), u'\\1\n | ', exceptions)
#print u'1\n' + extraitTextNew
extraitTextNewOld = extraitTextNew
###################################################
###### Modifications pour éviter des erreurs ######
###################################################
## TODO/À faire (ƒ4) : commenter les différents remplacements
re1 = re.compile(u'(\| *[^\[\]\{\}]*= *\[ *\[ *([fF]ichier|[Ff]ile|[Ii]mage) *:[^=\]]*\|[^=\]]*)=')
re2 = re.compile(u'(\| *[^\[\]\{\}]*= *[^\[\]\{\}]*\[ *\[[^\[\]\{\}]*)=([^\[\]\{\}]*\] *\])')
re3 = re.compile(u'(\| *[^\[\]\{\}]*= *[^\[\]\{\}]*\{ *\{[^\[\]\{\}]*)=([^\[\]\{\}]*\} *\})')
re4 = re.compile(u'(\| *[^\[\]\{\}]*=.*\{ *\{.*)\|(.*\} *\})')
re5 = re.compile(u'\|([^\[\]\{\}\|\n]*=) *')
#re5 = re.compile(u'(\| *[^\[\]\{\}]*=.*\{ *\{[^\}\|]*(\n+[^\}]*)+)\|([^\}]*(\n*[^\}]*)+\} *\})')
while re.search(re1, extraitTextNew):
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re1, u'\\1$±$', exceptions)
while re.search(re2, extraitTextNew):
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re2, u'\\1$±$\\2', exceptions)
while re.search(re3, extraitTextNew):
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re3, u'\\1$±$\\2', exceptions)
while re.search(re4, extraitTextNew):
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re4, u'\\1$®$\\2', exceptions)
while re.search(re5, extraitTextNew):
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re5, u'$¡$\\1 ', exceptions)
extraitTextNew = extraitTextNew.replace(u'|', u'$®$')
extraitTextNew = extraitTextNew.replace(u'$¡$', u'|')
###################################################
#print u'2\n' + extraitTextNew
#pywikibot.showDiff(extraitTextNewOld, extraitTextNew)
## Séparer tous les paramètres présents sur une même ligne ##
# ex : u'|nomcommune = Saint-Félix-de-Sorgues|région = [[Midi-Pyrénées]]'
# → u'|nomcommune = Saint-Félix-de-Sorgues
# |région = [[Midi-Pyrénées]]'
verificationStop = re.compile(u'(\| *[^\[\]\{\}]*=[^\n]*)(\| *[^\[\]\{\}]*=[^\n]*)')
#print extraitTextNew
while re.search(verificationStop, extraitTextNew):
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\| *[^\[\]\{\}]*=[^\n]*)(\| *[^\[\]\{\}]*=[^\n]*)'), u'\\1\n\\2', exceptions)
#print u'3\n' + extraitTextNew
verificationStop = re.compile(u'(\| *[^\[\{]+=.*\[ *\[(.*\|)+.*\] *\][^\[\n]*)(\| *[^\[\{]+=.*)')
while re.search(verificationStop, extraitTextNew):
##PROBLEME ICI (RESOlU ?)
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\| *[^\[\{]+=.*\[ *\[(.*\|)+.*\] *\][^\[\n]*)(\| *[^\[\{]+=.*)'), u'\\1\n\\2', exceptions)
#print u'4\n' + extraitTextNew
#print extraitTextNew
## Fait passer les parmètre en bout d'annonce du modèle à la ligne
# ex : {{Infobox Commune de France|nomcommune=Baudrecourt
# → {{Infobox Commune de France
# | nomcommune=Baudrecourt
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\{ *\{ *(%s) *\| *(.*)\n' % titreModeleOldRe), u'{{\\1\n | \\2\n', exceptions)
#print extraitTextNew
## Suppression d'un '|' inutile (ex : '| }}')
# ex : | }}
# → }}
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\n *\|+ *\} *\}'), u'\n}}', exceptions)
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\n *(\|?.*=.*) *\| *\} *\}'), u'\n\\1}}', exceptions)
#print u'5\n' + extraitTextNew
extraitTextNewOld = extraitTextNew
## Modifications majeures ##
for x in listeRechercheElements:
elementAChercher = listeRechercheElements[x]
print 'x = %i' % x
print elementAChercher
print re.search(elementAChercher, extraitTextNew)
print listeRemplacementsElements[x]
print re.sub(elementAChercher, listeRemplacementsElements[x], extraitTextNew)
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, elementAChercher, listeRemplacementsElements[x], exceptions)
#extraitTextNew = re.sub(elementAChercher, listeRemplacementsElements[x], extraitTextNew)
#print extraitTextNew
#print extraitTextNew
## Ajouts majeurs ##
for x in listeElementsAAjouter:
elementAAjouter = listeElementsAAjouter[x]
conditionNegative = listeConditionsNegativesAjouts[x]
conditionPositive = listeConditionsPositivesAjouts[x]
#print elementAAjouter
if not re.search(conditionNegative, extraitTextNew) and re.search(conditionPositive, extraitTextNew):
positionFin = extraitTextNew.rindex('\n}}')
#print positionFin
#print extraitTextNew[0:positionFin]
extraitTextNew = extraitTextNew[0:positionFin] + u"\n" + elementAAjouter + extraitTextNew[positionFin:]
#print u'5-5\n' + extraitTextNew
### Enlever les séparateurs des milliers dans certains paramètres donnés
listeElements = []
for element in listeElements:
m = re.search(u'(%s *= *)([0-9]* [0-9]* *)' % element, extraitTextNew)
if m != None:
new = m.group(1) + m.group(2).replace(u' ', u'')
#print u'1-1 : %s — %s' % (m.group(0), new)
extraitTextNew = extraitTextNew.replace(m.group(0), new)
#print extraitTextNew
## Vérifier si une modification majeure a eu lieu
if (extraitTextNew != extraitTextNewOld and onlySaveIfMajorChange) or exception_possibiliteSauvegarder:
possibiliteSauvegarder = True
#pywikibot.showDiff(extraitTextNewOld, extraitTextNew)
for x in listeRechercheElementsMineure: # Modifications mineures
elementAChercher = listeRechercheElementsMineure[x]
#print 'x = %i' % x
#print elementAChercher
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, elementAChercher, listeRemplacementsElementsMineure[x], exceptions)
#print extraitTextNew
else:
continue
#listeElements = [u'longitude', u'latitude']
#for element in listeElements:
# m = re.search(u'\n? *\| *%s *= *([-—–]?[0-9\.]+)' % element, extraitTextNew)
# m2 = re.search(u'\n? *\| *%s *= *[0-9]+\.[0-9]{4,}' % element, extraitTextNew)
# if m != None and m2 != None:
# extraitTextNew = extraitTextNew.replace(m.group(1), (u'%.4f' % float(m.group(1))))
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'\n *\| *'), u'\n | ', exceptions)
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\| *[a-zé²\- _]{2,17}) *= *'), u'\\1 = ', exceptions)
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, re.compile(u'(\|.*=.*)\| *$'), u'\\1', exceptions)
### Fin modification pour éviter des erreurs (1/2)
extraitTextNew = extraitTextNew.replace(u'$±$', u'=')
##### Ajouts de tous les paramètres absents #####
listeParametresActuelsAvecValeurs = extraitTextNew.split(u'\n | ')[1:]
listeParametres = [u'nomcommune', u'image', u'image-desc', u'armoiries',
u'armoiries-desc', u'armoiries-taille', u'logo', u'logo-desc', u'logo-taille', #u'collectivité',
u'région', u'canton', u'arrondissement', u'insee', u'cp', u'maire', u'mandat', u'intercomm',
u'latitude', u'longitude', u'alt mini', u'alt maxi', u'km²', u'sans', u'date-sans',
u'aire-urbaine', u'date-aire-urbaine', u'nomhab', u'siteweb', u'géoloc-département']
if ajoutParametresAbsents:
for parametre in listeParametres:
if not parametre in extraitTextNew:
parametrePrecedent = listeParametres[listeParametres.index(parametre) - 1]
old = re.compile(u'(\n? *\| *%s *= *.*)' % parametrePrecedent)
new = u'\\1\n | %s = ' % parametre
extraitTextNew = pywikibot.replaceExcept(extraitTextNew, old, new, exceptions)
#print '6\n' + extraitTextNew
##### Alignement des signes u'=' #####
listeParametresAvecValeurs = extraitTextNew.split(u'\n | ')[1:]
tailleMaxParametre = 0
for parametreAvecValeur in listeParametresAvecValeurs:
#print parametreAvecValeur
match = re.search(u' *=', parametreAvecValeur).group(0)
positionSigneEgal = parametreAvecValeur.index(match)
partieParametre = parametreAvecValeur[0:positionSigneEgal]
#print 'partieParametre = %s ; taille = %s' % (partieParametre, len(partieParametre))
if len(partieParametre) > tailleMaxParametre:
tailleMaxParametre = len(partieParametre)
tailleMaxParametre = tailleMaxParametre + 1 # Permet de laisser un espace avant le plus long paramètre…
#print '\ntailleMaxParametre = %i' % tailleMaxParametre
if not alignementSignesEgal:
listeParametresAvecValeurs = []
#print listeParametresAvecValeurs
for parametreAvecValeur in listeParametresAvecValeurs:
#print parametreAvecValeur
positionSigneEgal = parametreAvecValeur.index(u'=')
partieParametre = parametreAvecValeur[0:positionSigneEgal]
partieParametreNew = partieParametre
while len(partieParametreNew) < tailleMaxParametre:
partieParametreNew = partieParametreNew + u' '
while len(partieParametreNew) > tailleMaxParametre:
partieParametreNew = partieParametreNew[0:-1]
#print str(len(partieParametreNew)) + partieParametreNew
#print 'partieParametre = ' + partieParametre
parametreAvecValeurNew = pywikibot.replaceExcept(parametreAvecValeur, u'^%s' % partieParametre, partieParametreNew, exceptions)
#parametreAvecValeurNew = parametreAvecValeur.replace(u' | ' + partieParametre, u' | ' + partieParametreNew)
#print 'partieParametreNew = ' + partieParametreNew
extraitTextNew = extraitTextNew.replace(u'\n | ' + parametreAvecValeur, u'\n | ' + parametreAvecValeurNew)
### Fin modification pour éviter des erreurs (2/2)
extraitTextNew = extraitTextNew.replace(u'$®$', u'|')
#print extraitTextNew
###### Mettre à jour le texte grâce à l'extrait modifié et le publier ######
#print extraitTextNew
text = text.replace(extraitText, extraitTextNew)
resume = self.summary
#if re.search(u'longitude *= *[^\n]+.*', text) and re.search(u'latitude *= *[^\n]+.*', text):
# resume = u'[[WP:RBOT]] : Suppression du paramètre \'coordonnées\' dans le [[modèle:Infobox Pont]] au profit des paramètres \'latitude\' et \'longitude\''
#if re.search(u'\{ *\{ *[Cc]oord *\|.*\} *\}', text) and re.search(u'\| *latitude *= *.+', text) and re.search(u'\| *longitude *= *.+', text):
# text = pywikibot.replaceExcept(text, re.compile(u'\{ *\{ *[Cc]oord *\|.*\} *\}\n?'), u'', exceptions)
# resume = resume + u' ; suppression du modèle {{coord}} faisant doublon avec ces paramètres'
#print extraitTextNew
print u"‹!› Modifications ‹!›"
pywikibot.showDiff(page.get(), text)
#if checkNamespace and page.namespace() == checkNumberNamespace:
#print possibiliteSauvegarder
if possibiliteSauvegarder:
if not self.save(text, page, resume):
pywikibot.output(u'La page %s n\'a pas été sauvegardée' % page.title(asLink=True))
#else:
# pywikibot.output(u'\03{lightred}La page %s ne sera pas sauvegardée car elle n\'est pas dans le namespace 0\03{default}' % page.title(asLink=True))
def load(self, page):
"""
Retourne le contenu d'une page donnée.
En cas d'erreur, retourne None en précisant l'erreur rencontrée.
"""
try:
text = page.get()
except pywikibot.NoPage:
pywikibot.output(u"La page %s n'existe pas."
% page.title(asLink=True))
except pywikibot.IsRedirectPage:
pywikibot.output(u"La page %s est une redirection."
% page.title(asLink=True))
else:
return text
return None
def save(self, text, page, comment, minorEdit=True, botflag=True):
# Ne sauvegarder que si quelque chose a été modifié.
if text != page.get():
arretdurgence()
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% page.title())
# Diff montrant ce qui a été modifié
pywikibot.showDiff(page.get(), text)
# Commentaire d'édition du bot
pywikibot.output(u"Commentaire d'édition : %s" %comment)
# Pour plus d'info sur le paramètre dry, voir explications
# dans l'en-tête du script
if not self.dry:
#choice = 'y'
choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N')
if choice == 'y':
try:
if self.async:
page.put_async(text, comment=comment, minorEdit=minorEdit)
else:
page.put(text, comment=comment, minorEdit=minorEdit)#, botflag=botflag)
except pywikibot.EditConflict:
pywikibot.output(
u"Saute la page %s à cause d'un conflit d'édition."
% (page.title(asLink = True)))
except pywikibot.LockedPage:
pywikibot.output(u"La page %s est protégée."
% page.title(asLink = True))
except pywikibot.SpamblacklistError, error:
pywikibot.output(
u'Impossible de publier la page %s à cause du filtre anti-erreur n°%s'
% (page.title(asLink = True), error.url))
else:
return True
return False
def arretdurgence():
"""
Fonction d'arrêt d'urgence : recupère le contenu de la page
de discussion du bot et arrête le script en demandant une confirmation
lorsque la page contient autre chose que le modèle {{/Stop}}
(i.e. lorsqu'un message a été laissé sur la page de discussion)
"""
global arret
arretpage = pywikibot.Page(pywikibot.Site('fr','wikipedia'), u"Discussion utilisateur:ZéroBot")
arret = arretpage.get()
if arret != u"{{/Stop}}":
pywikibot.inputChoice(u"\03{lightred}Demande d'arrêt d'urgence\03{default}",['vu'],['v'],'')
def main():
dry = False
reprise = False
titreReprise = None
debug = False
async = False
for arg in pywikibot.handleArgs():
if arg.startswith("-dry"):
dry = True
elif arg.startswith("-reprise:"):
reprise = True
titreReprise = arg[len('-reprise:'):].replace('_', ' ')
if titreReprise == '':
pywikibot.output(u'Syntax: basic4_refonte.py [-reprise:|-dry|-debug]')
exit()
elif arg.startswith("-debug"):
debug = True
elif arg.startswith("-async"):
async = True
else:
pywikibot.output(u'Syntax: basic4_refonte.py [-reprise:|-dry|-debug|-async]')
exit()
bot = InfoboxBot(dry, reprise, titreReprise, debug, async)
bot.debugMode()
bot.run()
if __name__ == "__main__":
try:
main()
finally:
pywikibot.stopme()
| Toto-Azero/Wikipedia | pywikibot/basic4_refonte.py | Python | gpl-3.0 | 31,480 |
import win32com.client
import time
class jdi_win32:
@staticmethod
def paste_text(text):
shell = win32com.client.Dispatch("WScript.Shell")
time.sleep(3)
shell.Sendkeys(text)
shell.Sendkeys("~") | FunCat/JDI | Python/JDI/web/os_action/jdi_win32.py | Python | gpl-3.0 | 236 |
from . import actuate
import logging
if __name__ == '__main__':
d_a = 0.0
d_e = 0.0
d_r = 0.0
motor = 0.0
client = actuate.Client(logging)
while True:
line = input('> ').split()
cmd = line[0]
if cmd == 'exit':
break
if cmd == 'get':
pkt = client.getvals()[0]
print('{:2.3f}:{:2.3f}:{:2.3f}:{:2.3f}'.format(
pkt.d_a, pkt.d_e, pkt.d_r, pkt.motor_pwr))
if len(line) != 2:
continue
arg = float(line[1])
if cmd == 'aileron':
d_a = arg
elif cmd == 'rudder':
d_r = arg
elif cmd == 'elevator':
d_e = arg
elif cmd == 'motor':
motor = arg
client.setvals(d_a, d_e, d_r, motor)
| rbmj/pyflightcontrol | pyflightcontrol/aircraft/actuate_test.py | Python | apache-2.0 | 791 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserPageView.url'
db.alter_column('website_userpageview', 'url', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True))
def backwards(self, orm):
# Changing field 'UserPageView.url'
db.alter_column('website_userpageview', 'url', self.gf('django.db.models.fields.CharField')(max_length=210, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerattachment': {
'Meta': {'object_name': 'AnswerAttachment'},
'answer_reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerReference']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name_for_url': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.jurisdictionrating': {
'Meta': {'object_name': 'JurisdictionRating'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'support_attachments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.vieworgs': {
'Meta': {'object_name': 'ViewOrgs'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website'] | solarpermit/solarpermit | website/migrations/0096_auto__chg_field_userpageview_url.py | Python | bsd-3-clause | 53,218 |
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import luigi
import luigi.contrib.redshift
import mock
import unittest
# Fake AWS and S3 credentials taken from `../redshift_test.py`.
AWS_ACCESS_KEY = 'key'
AWS_SECRET_KEY = 'secret'
BUCKET = 'bucket'
KEY = 'key'
class DummyS3CopyToTable(luigi.contrib.redshift.S3CopyToTable):
# Class attributes taken from `DummyPostgresImporter` in
# `../postgres_test.py`.
host = 'dummy_host'
database = 'dummy_database'
user = 'dummy_user'
password = 'dummy_password'
table = luigi.Parameter(default='dummy_table')
columns = (
('some_text', 'varchar(255)'),
('some_int', 'int'),
)
aws_access_key_id = 'AWS_ACCESS_KEY'
aws_secret_access_key = 'AWS_SECRET_KEY'
copy_options = ''
prune_table = ''
prune_column = ''
prune_date = ''
def s3_load_path(self):
return 's3://%s/%s' % (BUCKET, KEY)
class DummyS3CopyToTempTable(DummyS3CopyToTable):
# Extend/alter DummyS3CopyToTable for temp table copying
table = luigi.Parameter(default='stage_dummy_table')
table_type = 'TEMP'
prune_date = 'current_date - 30'
prune_column = 'dumb_date'
prune_table = 'stage_dummy_table'
queries = ["insert into dummy_table select * from stage_dummy_table;"]
class TestS3CopyToTable(unittest.TestCase):
@mock.patch("luigi.contrib.redshift.S3CopyToTable.copy")
@mock.patch("luigi.contrib.redshift.RedshiftTarget")
def test_s3_copy_to_table(self, mock_redshift_target, mock_copy):
task = DummyS3CopyToTable()
task.run()
# The mocked connection cursor passed to
# S3CopyToTable.copy(self, cursor, f).
mock_cursor = (mock_redshift_target.return_value
.connect
.return_value
.cursor
.return_value)
# `mock_redshift_target` is the mocked `RedshiftTarget` object
# returned by S3CopyToTable.output(self).
mock_redshift_target.assert_called_with(database=task.database,
host=task.host,
update_id=task.task_id,
user=task.user,
table=task.table,
password=task.password)
# Check if the `S3CopyToTable.s3_load_path` class attribute was
# successfully referenced in the `S3CopyToTable.run` method, which is
# in-turn passed to `S3CopyToTable.copy` and other functions in `run`
# (see issue #995).
mock_copy.assert_called_with(mock_cursor, task.s3_load_path())
# Check the SQL query in `S3CopyToTable.does_table_exist`.
mock_cursor.execute.assert_called_with("select 1 as table_exists "
"from pg_table_def "
"where tablename = %s limit 1",
(task.table,))
return
@mock.patch("luigi.contrib.redshift.S3CopyToTable.does_table_exist",
return_value=False)
@mock.patch("luigi.contrib.redshift.RedshiftTarget")
def test_s3_copy_to_missing_table(self,
mock_redshift_target,
mock_does_exist):
"""
Test missing table creation
"""
# Ensure `S3CopyToTable.create_table` does not throw an error.
task = DummyS3CopyToTable()
task.run()
# Make sure the cursor was successfully used to create the table in
# `create_table` as expected.
mock_cursor = (mock_redshift_target.return_value
.connect
.return_value
.cursor
.return_value)
assert mock_cursor.execute.call_args_list[0][0][0].startswith(
"CREATE TABLE %s" % task.table)
return
@mock.patch("luigi.contrib.redshift.S3CopyToTable.copy")
@mock.patch("luigi.contrib.redshift.RedshiftTarget")
def test_s3_copy_to_temp_table(self, mock_redshift_target, mock_copy):
task = DummyS3CopyToTempTable()
task.run()
# The mocked connection cursor passed to
# S3CopyToTable.copy(self, cursor, f).
mock_cursor = (mock_redshift_target.return_value
.connect
.return_value
.cursor
.return_value)
# `mock_redshift_target` is the mocked `RedshiftTarget` object
# returned by S3CopyToTable.output(self).
mock_redshift_target.assert_called_once_with(
database=task.database,
host=task.host,
update_id=task.task_id,
user=task.user,
table=task.table,
password=task.password,
)
# Check if the `S3CopyToTable.s3_load_path` class attribute was
# successfully referenced in the `S3CopyToTable.run` method, which is
# in-turn passed to `S3CopyToTable.copy` and other functions in `run`
# (see issue #995).
mock_copy.assert_called_once_with(mock_cursor, task.s3_load_path())
# Check the SQL query in `S3CopyToTable.does_table_exist`. # temp table
mock_cursor.execute.assert_any_call(
"select 1 as table_exists "
"from pg_table_def "
"where tablename = %s limit 1",
(task.table,),
)
class TestS3CopyToSchemaTable(unittest.TestCase):
@mock.patch("luigi.contrib.redshift.S3CopyToTable.copy")
@mock.patch("luigi.contrib.redshift.RedshiftTarget")
def test_s3_copy_to_table(self, mock_redshift_target, mock_copy):
task = DummyS3CopyToTable(table='dummy_schema.dummy_table')
task.run()
# The mocked connection cursor passed to
# S3CopyToTable.copy(self, cursor, f).
mock_cursor = (mock_redshift_target.return_value
.connect
.return_value
.cursor
.return_value)
# Check the SQL query in `S3CopyToTable.does_table_exist`.
mock_cursor.execute.assert_called_with(
"select 1 as table_exists "
"from information_schema.tables "
"where table_schema = %s and "
"table_name = %s limit 1",
tuple(task.table.split('.')),
)
class DummyRedshiftUnloadTask(luigi.contrib.redshift.RedshiftUnloadTask):
# Class attributes taken from `DummyPostgresImporter` in
# `../postgres_test.py`.
host = 'dummy_host'
database = 'dummy_database'
user = 'dummy_user'
password = 'dummy_password'
table = luigi.Parameter(default='dummy_table')
columns = (
('some_text', 'varchar(255)'),
('some_int', 'int'),
)
aws_access_key_id = 'AWS_ACCESS_KEY'
aws_secret_access_key = 'AWS_SECRET_KEY'
s3_unload_path = 's3://%s/%s' % (BUCKET, KEY)
unload_options = "DELIMITER ',' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL OFF"
def query(self):
return "SELECT 'a' as col_a, current_date as col_b"
class TestRedshiftUnloadTask(unittest.TestCase):
@mock.patch("luigi.contrib.redshift.RedshiftTarget")
def test_redshift_unload_command(self, mock_redshift_target):
task = DummyRedshiftUnloadTask()
task.run()
# The mocked connection cursor passed to
# RedshiftUnloadTask.
mock_cursor = (mock_redshift_target.return_value
.connect
.return_value
.cursor
.return_value)
# Check the Unload query.
mock_cursor.execute.assert_called_with(
"UNLOAD ( 'SELECT \\'a\\' as col_a, current_date as col_b' ) TO 's3://bucket/key' "
"credentials 'aws_access_key_id=AWS_ACCESS_KEY;aws_secret_access_key=AWS_SECRET_KEY' "
"DELIMITER ',' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL OFF;"
)
| mfcabrera/luigi | test/contrib/redshift_test.py | Python | apache-2.0 | 9,072 |
from django.apps import AppConfig
class SharedConfig(AppConfig):
name = 'shared'
| noahbkim/finances | shared/apps.py | Python | bsd-3-clause | 87 |
from django.db import models
class Host(models.Model):
ip = models.GenericIPAddressField(unpack_ipv4=True, primary_key=True)
name = models.CharField(max_length=30, editable=False, null=True)
last_up = models.DateTimeField(null=True, editable=False)
up_since = models.DateTimeField(null=True, editable=False)
up = models.NullBooleanField(default=None)
monitor = models.BooleanField(default=True)
network = models.ForeignKey('Network')
def __unicode__(self):
if self.name:
name = self.name
else:
name = "unknown"
return "%s (%s)" % (self.ip, name)
def save(self, *args, **kwargs):
try:
old = Host.objects.get(pk=self.pk)
except Host.DoesNotExist:
old = None
super(Host, self).save(*args, **kwargs)
if not old or old.up != self.up:
Event.objects.create(host=self, up=self.up)
class Network(models.Model):
slug = models.SlugField()
name = models.CharField(max_length=30, editable=True, unique=True)
def __unicode__(self):
return "%s" % (self.name)
class Event(models.Model):
host = models.ForeignKey('Host', editable=False)
time = models.DateTimeField(auto_now_add=True, editable=False)
up = models.NullBooleanField(default=None)
def __unicode__(self):
return u"%s %s -> %s" % (self.time, self.host.ip, self.up)
| kapsiry/vahti | hostmonitor/models.py | Python | mit | 1,411 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is CoGroupByKey load test with Synthetic Source. Besides of the standard
input options there are additional options:
* project (optional) - the gcp project in case of saving
metrics in Big Query (in case of Dataflow Runner
it is required to specify project of runner),
* metrics_namespace (optional) - name of BigQuery table where metrics
will be stored,
in case of lack of any of both options metrics won't be saved
* input_options - options for Synthetic Sources
* co_input_options - options for Synthetic Sources.
Example test run on DirectRunner:
python setup.py nosetests \
--test-pipeline-options="
--project=big-query-project
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
To run test on other runner (ex. Dataflow):
python setup.py nosetests \
--test-pipeline-options="
--runner=TestDataflowRunner
--project=...
--staging_location=gs://...
--temp_location=gs://...
--sdk_location=./dist/apache-beam-x.x.x.dev0.tar.gz
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
"""
from __future__ import absolute_import
import json
import logging
import unittest
import apache_beam as beam
from apache_beam.testing import synthetic_pipeline
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
from apache_beam.testing.load_tests.load_test_metrics_utils import MetricsMonitor
from google.cloud import bigquery as bq
except ImportError:
bq = None
INPUT_TAG = 'pc1'
CO_INPUT_TAG = 'pc2'
RUNTIME_LABEL = 'runtime'
@unittest.skipIf(bq is None, 'BigQuery for storing metrics not installed')
class CoGroupByKeyTest(unittest.TestCase):
def parseTestPipelineOptions(self, options):
return {
'numRecords': options.get('num_records'),
'keySizeBytes': options.get('key_size'),
'valueSizeBytes': options.get('value_size'),
'bundleSizeDistribution': {
'type': options.get(
'bundle_size_distribution_type', 'const'
),
'param': options.get('bundle_size_distribution_param', 0)
},
'forceNumInitialBundles': options.get(
'force_initial_num_bundles', 0
)
}
def setUp(self):
self.pipeline = TestPipeline(is_integration_test=True)
self.input_options = json.loads(self.pipeline.get_option('input_options'))
self.co_input_options = json.loads(
self.pipeline.get_option('co_input_options'))
metrics_project_id = self.pipeline.get_option('project')
self.metrics_namespace = self.pipeline.get_option('metrics_table')
metrics_dataset = self.pipeline.get_option('metrics_dataset')
self.metrics_monitor = None
check = metrics_project_id and self.metrics_namespace and metrics_dataset\
is not None
if check:
measured_values = [{'name': RUNTIME_LABEL,
'type': 'FLOAT',
'mode': 'REQUIRED'}]
self.metrics_monitor = MetricsMonitor(
project_name=metrics_project_id,
table=self.metrics_namespace,
dataset=metrics_dataset,
schema_map=measured_values
)
else:
logging.error('One or more of parameters for collecting metrics '
'are empty. Metrics will not be collected')
class _Ungroup(beam.DoFn):
def process(self, element):
values = element[1]
inputs = values.get(INPUT_TAG)
co_inputs = values.get(CO_INPUT_TAG)
for i in inputs:
yield i
for i in co_inputs:
yield i
def testCoGroupByKey(self):
with self.pipeline as p:
pc1 = (p
| 'Read ' + INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.input_options)))
| 'Make ' + INPUT_TAG + ' iterable' >> beam.Map(lambda x: (x, x))
| 'Measure time: Start pc1' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
pc2 = (p
| 'Read ' + CO_INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.co_input_options)))
| 'Make ' + CO_INPUT_TAG + ' iterable' >> beam.Map(
lambda x: (x, x))
| 'Measure time: Start pc2' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
# pylint: disable=expression-not-assigned
({INPUT_TAG: pc1, CO_INPUT_TAG: pc2}
| 'CoGroupByKey: ' >> beam.CoGroupByKey()
| 'Consume Joined Collections' >> beam.ParDo(self._Ungroup())
| 'Measure time: End' >> beam.ParDo(MeasureTime(self.metrics_namespace))
)
result = p.run()
result.wait_until_finish()
if self.metrics_monitor is not None:
self.metrics_monitor.send_metrics(result)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| mxm/incubator-beam | sdks/python/apache_beam/testing/load_tests/co_group_by_key_test.py | Python | apache-2.0 | 7,004 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import dhtmlparser
from harvester.autoparser import path_patterns
# Functions & objects =========================================================
def test_PathCall():
params = ["type", "index", "params"]
p = path_patterns.PathCall(*params)
assert p.call_type == params[0]
assert p.index == params[1]
assert p.params == params[2]
def test_Chained():
c = path_patterns.Chained((1, 2, 3))
# test conversion to list
assert c.chain == [1, 2, 3]
def test_params_or_none():
assert path_patterns._params_or_none({}) is None
assert path_patterns._params_or_none({1:2}) == {1:2}
def test_neighbour_to_path_call():
dom = dhtmlparser.parseString("<xex>\tHello </xex>")
xex = dom.find("xex")[0]
res = path_patterns._neighbour_to_path_call("left", xex, xex)
assert isinstance(res, path_patterns.PathCall)
assert res.call_type == "left_neighbour_tag"
assert res.index == 0
assert res.params.tag_name == "xex"
assert res.params.params == None
assert res.params.fn_params == ["xex", None, "Hello"]
def test_neighbour_to_path_call_text():
dom = dhtmlparser.parseString("<xex>\tHello </xex>")
text = dom.find("xex")[0].childs[0]
res = path_patterns._neighbour_to_path_call("left", text, text)
assert isinstance(res, path_patterns.PathCall)
assert res.call_type == "left_neighbour_tag"
assert res.index == 0
assert res.params.tag_name == "\tHello "
assert res.params.params == None
assert res.params.fn_params == [None, None, "Hello"]
def test_neighbours_pattern():
dom = dhtmlparser.parseString(
"""
asd
<x>haxaxex</x>
<xex>\tHello</xex>
<xep></xep>
asd
"""
)
dhtmlparser.makeDoubleLinked(dom)
xex = dom.find("xex")[0]
res = path_patterns.neighbours_pattern(xex)
assert res
assert len(res) == 2
left, right = res
assert left.call_type == "left_neighbour_tag"
assert left.index == 0
assert left.params.tag_name == "xex"
assert left.params.params == None
assert left.params.fn_params == ["x", None, "haxaxex"]
assert right.call_type == "right_neighbour_tag"
assert right.index == 0
assert right.params.tag_name == "xex"
assert right.params.params == None
assert right.params.fn_params == ["xep", None, ""]
def test_neighbours_pattern_text_neigh():
dom = dhtmlparser.parseString(
"""
asd
<xex>\tHello</xex>
<xep></xep>
asd
"""
)
dhtmlparser.makeDoubleLinked(dom)
xex = dom.find("xex")[0]
res = path_patterns.neighbours_pattern(xex)
assert res
assert len(res) == 2
left, right = res
assert left.call_type == "left_neighbour_tag"
assert left.index == 0
assert res[0].params.tag_name == "xex"
assert res[0].params.params == None
assert left.params.fn_params == [None, None, "asd"]
assert right.call_type == "right_neighbour_tag"
assert right.index == 0
assert res[0].params.tag_name == "xex"
assert res[0].params.params == None
assert right.params.fn_params == ["xep", None, ""]
def test_neighbours_pattern_left_corner():
dom = dhtmlparser.parseString(
"""
<xex>\tHello</xex>
<xep></xep>
asd
"""
)
dhtmlparser.makeDoubleLinked(dom)
xex = dom.find("xex")[0]
res = path_patterns.neighbours_pattern(xex)
assert res
assert len(res) == 1
assert res[0].call_type == "right_neighbour_tag"
assert res[0].index == 0
assert res[0].params.tag_name == "xex"
assert res[0].params.params == None
assert res[0].params.fn_params == ["xep", None, ""]
def test_neighbours_pattern_right_corner():
dom = dhtmlparser.parseString(
"""
asd
<xex>\tHello</xex>
"""
)
dhtmlparser.makeDoubleLinked(dom)
xex = dom.find("xex")[0]
res = path_patterns.neighbours_pattern(xex)
assert res
assert len(res) == 1
assert res[0].call_type == "left_neighbour_tag"
assert res[0].index == 0
assert res[0].params.tag_name == "xex"
assert res[0].params.params == None
assert res[0].params.fn_params == [None, None, "asd"]
def test_neighbours_pattern_both_corners():
dom = dhtmlparser.parseString(
"""
<xex>\tHello</xex>
"""
)
dhtmlparser.makeDoubleLinked(dom)
xex = dom.find("xex")[0]
res = path_patterns.neighbours_pattern(xex)
assert not res
def test_predecesors_pattern():
dom = dhtmlparser.parseString(
"""
<root>
<xex>
<x>content</x>
</xex>
</root>
"""
)
dhtmlparser.makeDoubleLinked(dom)
x = dom.find("x")[0]
res = path_patterns.predecesors_pattern(x, dom)
assert res
assert len(res) == 1
assert isinstance(res[0], path_patterns.PathCall)
assert res[0].call_type == "match"
assert res[0].index == 0
assert res[0].params == [
["root", None],
["xex", None],
["x", None],
]
def test_predecesors_pattern_shallow_root():
dom = dhtmlparser.parseString(
"""
<root>
<x>content</x>
</root>
"""
)
dhtmlparser.makeDoubleLinked(dom)
x = dom.find("x")[0]
res = path_patterns.predecesors_pattern(x, dom)
assert not res
| edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/tests/unittests/autoparser/test_path_patterns.py | Python | mit | 5,559 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Authors module.
This module is work in progress. To enable it, remove the entry from
PACKAGE_EXCLUDED in invenio.base.config.
"""
from warnings import warn
# TODO Remove this warning when authors module is stable.
warn("This module is 'work in progress' and thus unstable.", ImportWarning)
| egabancho/invenio | invenio/modules/authors/__init__.py | Python | gpl-2.0 | 1,086 |
from django import forms
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from taggit.utils import edit_string_for_tags
from django.utils import six
class TagsInput(forms.TextInput):
class Media:
css = {'all': ('css/bootstrap-tagsinput.css', 'css/typeahead.css')}
js = ('js/typeahead.jquery.min.js', 'js/bootstrap-tagsinput.min.js')
def render(self, name, value, attrs=None, renderer=None):
if value is not None and not isinstance(value, six.string_types):
value = edit_string_for_tags([o.tag for o in value.select_related("tag")])
final_attrs = self.build_attrs(attrs, extra_attrs={"name": name})
return mark_safe(render_to_string('taggit_bootstrap/widget.html', {
'final_attrs': flatatt(final_attrs),
'value': value if value else '',
'id': final_attrs['id']
}))
| mi6gan/django-taggit-bootstrap | taggit_bootstrap/widgets.py | Python | mit | 993 |
from flask import Flask, redirect, url_for, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager, UserMixin, login_user, logout_user,\
current_user
from oauth import OAuthSignIn
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top secret!'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['OAUTH_CREDENTIALS'] = {
'facebook': {
'id': '1404534669856174',
'secret': 'fe7884b9bb5d8661ef80f1326a314a03'
},
'twitter': {
'id': '3RzWQclolxWZIMq5LJqzRZPTl',
'secret': 'm9TEd58DSEtRrZHpz2EjrV9AhsBRxKMo8m3kuIZj3zLwzwIimt'
}
}
db = SQLAlchemy(app)
lm = LoginManager(app)
lm.login_view = 'index'
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
social_id = db.Column(db.String(64), nullable=False, unique=True)
nickname = db.Column(db.String(64), nullable=False)
email = db.Column(db.String(64), nullable=True)
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/authorize/<provider>')
def oauth_authorize(provider):
if not current_user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@app.route('/callback/<provider>')
def oauth_callback(provider):
if not current_user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.callback()
# social_id, username, email = oauth.callback()
# if social_id is None:
# flash('Authentication failed.')
# return redirect(url_for('index'))
# user = User.query.filter_by(social_id=social_id).first()
# if not user:
# user = User(social_id=social_id, nickname=username, email=email)
# db.session.add(user)
# db.session.commit()
# login_user(user, True)
# return redirect(url_for('index'))
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| zurez/reviews | app.py | Python | mit | 2,218 |
# #!/usr/bin/python
# If you use the Apple-built Python (/usr/bin/python), then it should
# automatically find PyObjC and related Python modules in its path.
# The PyObjC module isn't automatically found in the PYTHONPATH for my Python
# installation from Python.Org, so I will explicity add it to the python path:
import sys
sys.path.append('/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC')
import appdirs
import ConfigParser
import threading
import objc
from Foundation import *
from AppKit import *
from PyObjCTools import AppHelper
import time
import os
import subprocess
import tempfile
import traceback
import shutil
import webbrowser
import plistlib
import biplist
import unicodedata
import requests
from AlertDialog import alert
from UserExperimentsModel import UserExperimentsModel
from ExperimentDatasetsModel import ExperimentDatasetsModel
from DatasetFilesModel import DatasetFilesModel
# This Python script can't be run with "python desktopSync.py".
# You need to build an App bundle with py2app, using:
# python package_mac_version.py
# The executable will end up in:
# ./dist/MyTardis Desktop Sync.app/Contents/MacOS/MyTardis Desktop Sync
# and the Growl framework will end up in:
# ./dist/MyTardis Desktop Sync.app/Contents/Frameworks/Growl.framework
frameworkPath = '../Frameworks/Growl.framework'
myGrowlBundle = objc.loadBundle(
"GrowlApplicationBridge",
globals(),
bundle_path = objc.pathForFramework(frameworkPath)
)
windowController = None
def runApplescript(applescript):
NSLog(unicode(applescript))
tempAppleScriptFile=tempfile.NamedTemporaryFile(delete=False)
tempAppleScriptFileName=tempAppleScriptFile.name
tempAppleScriptFile.write(applescript)
tempAppleScriptFile.close()
proc = subprocess.Popen(['/usr/bin/osascript',tempAppleScriptFileName], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
stdout, stderr = proc.communicate()
NSLog(unicode(stderr))
NSLog(unicode(stdout))
os.unlink(tempAppleScriptFileName)
# Bring app to top
NSApp.activateIgnoringOtherApps_(True)
class MenuMakerDelegate(NSObject):
"""
This is a delegate for Growl, a required element of using the Growl
service.
There isn't a requirement that delegates actually 'do' anything, but
in this case, it creates a menulet with a Tardis icon.
"""
statusbar = None
state = 'idle'
def applicationDidFinishLaunching_(self, notification):
"""
Set up the menu and our menu items.
"""
statusbar = NSStatusBar.systemStatusBar()
# Create the statusbar item
#self.statusitem = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
self.statusitem = statusbar.statusItemWithLength_(-1)
self.statusitem.setHighlightMode_(1) # Let it highlight upon clicking
self.statusitem.setToolTip_('MyTardis Desktop Sync') # Set a tooltip
#self.statusitem.setTitle_('MyTardis') # Set an initial title
self.icon = NSImage.alloc().initByReferencingFile_('tardis.png')
self.icon.setScalesWhenResized_(True)
self.icon.setSize_((20, 20))
# We can change the icon later if the status changes to "syncing".
self.statusitem.setImage_(self.icon)
# Build a very simple menu
self.menu = NSMenu.alloc().init()
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
'Open MyTardis in web browser',
'onOpenMyTardisInWebBrowser:',
''
)
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
'Open local MyTardis folder',
'onOpenLocalMyTardisFolder:',
''
)
self.menu.addItem_(menuitem)
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
'Settings...',
'onOpenSettings:',
''
)
self.menu.addItem_(menuitem)
# Default event
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
'Quit',
'terminate:',
''
)
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
def onOpenMyTardisInWebBrowser_(self,notification):
webbrowser.open("https://mytardis.massive.org.au/")
def onOpenLocalMyTardisFolder_(self,notification):
NSLog(u"onOpenLocalMyTardisFolder")
self.localFolder = str(windowController.localFolderField.stringValue())
if os.path.exists(self.localFolder):
NSLog(u"Path exists.")
os.system('open "' + self.localFolder + '"')
else:
NSLog(u"Path doesn't exist.")
alert("MyTardis Desktop Sync", "Path " + self.localFolder + " doesn't exist!", ["OK"])
def onOpenSettings_(self,notification):
# Bring app to top
NSApp.activateIgnoringOtherApps_(True)
windowController.usernameField.becomeFirstResponder()
sender = None
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
class rcGrowl(NSObject):
"""
rcGrowl registers this app with Growl to send out notifications
on behalf of the user and do 'something' with the results when a
notification has been clicked.
For additional information on what the what is going on
please refer to the growl documentation:
http://growl.info/documentation/developer/implementing-growl.php
"""
def rcSetDelegate(self):
GrowlApplicationBridge.setGrowlDelegate_(self)
def registrationDictionaryForGrowl(self):
"""
http://growl.info/documentation/developer/implementing-growl.php#registration
"""
return {
u'ApplicationName' : 'rcGrowlMacTidy',
u'AllNotifications' : ['growlNotification1'],
u'DefaultNotifications' : ['growlNotification1'],
u'NotificationIcon' : None,
}
# don't know if it is working or not
def applicationNameForGrowl(self):
"""
Identifies the application.
"""
return 'rcGrowlMacTidy'
def applicationIconDataForGrowl(self):
"""
Custom icon to display in the notification.
This doesn't seem to work, but we can just create
a Tardis.icns bundle using Img2icns and add it
to the Info.plist in the create_mac_bundle.py
script used with py2app.
"""
icon = NSImage.alloc().init()
icon = icon.initWithContentsOfFile_(u'tardis.png')
return icon
def growlNotificationWasClicked_(self, ctx):
"""
callback for onClick event
"""
#NSLog(u"we got a click! " + str(time.time()) + " >>> " + str(ctx) + " <<<\n")
def growlNotificationTimedOut_(self, ctx):
"""
callback for timing out
"""
#NSLog(u"We timed out" + str(ctx) + "\n")
def growlIsReady(self):
"""
Informs the delegate that GrowlHelperApp was launched
successfully. Presumably if it's already running it
won't need to run it again?
"""
#NSLog(u"growl IS READY")
class MyTardisGrowlTest(NSWindowController):
settingsPanel = objc.IBOutlet()
serverAddressField = objc.IBOutlet()
localFolderField = objc.IBOutlet()
usernameField = objc.IBOutlet()
passwordField = objc.IBOutlet()
experimentsPanel = objc.IBOutlet()
def awakeFromNib(self):
self.serverAddressField.setStringValue_("https://mytardis.massive.org.au/")
localFolder = os.path.join(os.path.expanduser('~'),"MyTardis")
self.localFolderField.setStringValue_(localFolder)
if globalConfig.has_section("Global Preferences"):
if globalConfig.has_option("Global Preferences", "username"):
username = globalConfig.get("Global Preferences", "username")
self.usernameField.setStringValue_(username)
self.notificationCenter = NSNotificationCenter.defaultCenter()
self.notificationCenter.addObserver_selector_name_object_(self, "createLocalFolderAndAddToSidebar:", 'createLocalFolderAndAddToSidebar', None)
self.notificationCenter.addObserver_selector_name_object_(self, "getRemoteExperimentsForUser:", 'getRemoteExperimentsForUser', None)
self.notificationCenter.addObserver_selector_name_object_(self, "scanLocalExperimentFolders:", 'scanLocalExperimentFolders', None)
self.notificationCenter.addObserver_selector_name_object_(self, "askUserWhichExperimentsToSync:", 'askUserWhichExperimentsToSync', None)
self.notificationCenter.addObserver_selector_name_object_(self, "deleteUnusedLocalExperimentFolders:", 'deleteUnusedLocalExperimentFolders', None)
self.notificationCenter.addObserver_selector_name_object_(self, "getDatasetsForExperiments:", 'getDatasetsForExperiments', None)
self.notificationCenter.addObserver_selector_name_object_(self, "getDatasetFilesForDatasets:", 'getDatasetFilesForDatasets', None)
self.notificationCenter.addObserver_selector_name_object_(self, "createLocalExperimentFolders:", 'createLocalExperimentFolders', None)
self.notificationCenter.addObserver_selector_name_object_(self, "createLocalDatasetFolders:", 'createLocalDatasetFolders', None)
self.notificationCenter.addObserver_selector_name_object_(self, "downloadDatasetFiles:", 'downloadDatasetFiles', None)
self.menuMakerDelegate = NSApplication.sharedApplication().delegate()
self.tardisTickStatusBarIcon = NSImage.alloc().initByReferencingFile_('tardis_tick.png')
self.tardisTickStatusBarIcon.setScalesWhenResized_(True)
self.tardisTickStatusBarIcon.setSize_((20, 20))
self.tardisCrossStatusBarIcon = NSImage.alloc().initByReferencingFile_('tardis_cross.png')
self.tardisCrossStatusBarIcon.setScalesWhenResized_(True)
self.tardisCrossStatusBarIcon.setSize_((20, 20))
self.tardisRefreshStatusBarIcon = NSImage.alloc().initByReferencingFile_('tardis_refresh.png')
self.tardisRefreshStatusBarIcon.setScalesWhenResized_(True)
self.tardisRefreshStatusBarIcon.setSize_((20, 20))
@objc.IBAction
def onConnect_(self, sender):
# I've noticed a bug where after pressing Connect on the Settings panel,
# and allowing the MyTardis connections to be made, if you open the
# Settings panel again, this method gets called immediately when the
# panel is displayed, even though the Connect button hasn't been pressed
# yet. This could be because this method is doing too much in the GUI
# thread - it really should be spawning a separate thread. I have
# lessened the problem by ensuring that the password field is cleared
# immediately after it is used - this will prevent the time-consuming
# part of this method from being accidentally executed a second time.
if (hasattr(self,'onConnectRunning') and self.onConnectRunning):
return
self.onConnectingRunning = True
self.mytardisUrl = self.serverAddressField.stringValue()
self.username = self.usernameField.stringValue()
self.password = self.passwordField.stringValue()
if self.password.strip()=="":
alert("MyTardis Desktop Sync", "Please enter your password.", ["OK"])
windowController.passwordField.becomeFirstResponder()
return
globalConfig.set("Global Preferences", "username", self.username)
with open(globalPreferencesFilePath, 'wb') as globalPreferencesFileObject:
globalConfig.write(globalPreferencesFileObject)
self.passwordField.setStringValue_("")
sender = None
self.settingsPanel.performClose_(sender)
self.onConnectingRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('createLocalFolderAndAddToSidebar', None, None)
@objc.signature('v@:@')
def createLocalFolderAndAddToSidebar_(self,sender):
if (hasattr(self,'createLocalFolderAndAddToSidebarRunning') and self.createLocalFolderAndAddToSidebarRunning):
return
self.createLocalFolderAndAddToSidebarRunning = True
self.localFolder = self.localFolderField.stringValue()
if not os.path.exists(self.localFolder):
try:
os.mkdir(self.localFolder)
except:
errorMessage = "Failed to create directory: " + self.localFolder
alert("MyTardis Desktop Sync", errorMessage, ["OK"])
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
self.createLocalFolderAndAddToSidebarRunning = False
return
# Check whether our local MyTardis folder has already been added to the
# Finder's sidebar:
self.localFolderIsInFinderSidebar = False
sidebarPlistFilePath = os.path.join(os.path.expanduser('~'),"Library/Preferences/com.apple.sidebarlists.plist")
if os.path.exists(sidebarPlistFilePath):
sidebarPlist = None
try:
sidebarPlist = biplist.readPlist(sidebarPlistFilePath)
except InvalidPlistException, e:
NSLog(u"Invalid plist.")
except NotBinaryPlistException, e:
sidebarPlist = plistlib.readPlist(sidebarPlistFilePath)
localFolderSearchString = unicodedata.normalize('NFKD', self.localFolder.strip("/")).encode('ascii','ignore')
favouriteVolumesList = sidebarPlist['favorites']['VolumesList']
for volume in favouriteVolumesList:
if 'Alias' in volume.keys() and localFolderSearchString in volume['Alias']:
NSLog(u"Found path in alias: Name = " + volume['Name'])
self.localFolderIsInFinderSidebar = True
if not self.localFolderIsInFinderSidebar:
# The following applescript adds the local folder to the Finder's sidebar.
applescript = """
tell application "Finder"
activate
set myFolder to POSIX file """
applescript = applescript + '"' + self.localFolder + '"'
applescript = applescript + """
select myFolder
tell application "System Events"
# Add folder to Finder sidebar
keystroke "t" using command down
end tell
# close front Finder window
end tell
"""
runApplescript(applescript)
self.createLocalFolderAndAddToSidebarRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('getRemoteExperimentsForUser', None, None)
@objc.signature('v@:@')
def getRemoteExperimentsForUser_(self,sender):
if (hasattr(self,'getRemoteExperimentsForUserRunning') and self.getRemoteExperimentsForUserRunning):
return
self.getRemoteExperimentsForUserRunning = True
self.userExperimentsModel = UserExperimentsModel(self.mytardisUrl,self.username,self.password)
self.userExperimentsModel.parseMyTardisExperimentList()
errorMessage = self.userExperimentsModel.getUnhandledExceptionMessage()
if errorMessage is not None:
NSLog(unicode(errorMessage))
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
self.menuMakerDelegate.statusitem.setImage_(self.tardisCrossStatusBarIcon)
alert("MyTardis Desktop Sync", errorMessage, ["OK"])
self.getRemoteExperimentsForUserRunning = False
return
self.getRemoteExperimentsForUserRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('scanLocalExperimentFolders', None, None)
@objc.signature('v@:@')
def scanLocalExperimentFolders_(self,sender):
if (hasattr(self,'scanLocalExperimentFoldersRunning') and self.scanLocalExperimentFoldersRunning):
return
self.scanLocalExperimentFoldersRunning = True
self.localExperimentFolders = {}
for experimentID,experiment in self.userExperimentsModel.getExperiments().iteritems():
if os.path.exists(os.path.join(self.localFolder, experiment.getDirectoryName())):
self.localExperimentFolders[experimentID] = experiment.getDirectoryName()
else:
self.localExperimentFolders[experimentID] = None
self.scanLocalExperimentFoldersRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('askUserWhichExperimentsToSync', None, None)
@objc.signature('v@:@')
def askUserWhichExperimentsToSync_(self,sender):
if (hasattr(self,'askUserWhichExperimentsToSyncRunning') and self.askUserWhichExperimentsToSyncRunning):
return
self.askUserWhichExperimentsToSyncRunning = True
experiments = []
for experimentID in self.userExperimentsModel.getExperimentIDs():
experiments.append(dict(
optin = (self.localExperimentFolders[experimentID]!=None),
experimentID = experimentID,
experimentName = self.userExperimentsModel.getExperimentTitles()[experimentID]
))
NSUserDefaults.standardUserDefaults().setObject_forKey_(experiments,'experiments')
if NSApplication.sharedApplication().runModalForWindow_(self.experimentsPanel) == NSOKButton:
NSLog(u"experimentsPanel OK")
else:
NSLog(u"experimentsPanel Cancel")
self.askUserWhichExperimentsToSyncRunning = False
return
experimentsPlistArray = NSUserDefaults.standardUserDefaults().objectForKey_('experiments')
for experimentDict in experimentsPlistArray:
experiment = self.userExperimentsModel.getExperiment(experimentDict['experimentID'])
experiment.setOptIn(experimentDict['optin'])
self.askUserWhichExperimentsToSyncRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('deleteUnusedLocalExperimentFolders', None, None)
@objc.signature('v@:@')
def deleteUnusedLocalExperimentFolders_(self,sender):
if (hasattr(self,'deleteUnusedLocalExperimentFoldersRunning') and self.deleteUnusedLocalExperimentFoldersRunning):
return
self.deleteUnusedLocalExperimentFoldersRunning = True
localFolderSubdirectoriesToDelete = []
localFolderSubdirectories = os.walk(self.localFolder).next()[1]
for localFolderSubdirectory in localFolderSubdirectories:
if not " - " in localFolderSubdirectory:
continue
localFolderExperimentID = os.path.split(localFolderSubdirectory)[1].split(" - ")[0]
if self.userExperimentsModel.getExperiment(localFolderExperimentID)!=None and \
self.userExperimentsModel.getExperiment(localFolderExperimentID).getOptIn()==False:
localFolderSubdirectoriesToDelete.append(self.userExperimentsModel.getExperiment(localFolderExperimentID).getDirectoryName())
if len(localFolderSubdirectoriesToDelete)>0:
message = """
WARNING: The following experiment folders:
"""
for localFolderSubdirectoryToDelete in localFolderSubdirectoriesToDelete:
message = message + localFolderSubdirectoryToDelete + "\n"
message = message + """
will be deleted from:
"""
message = message + self.localFolder
okButtonID = NSAlertFirstButtonReturn
cancelButtonID = NSAlertSecondButtonReturn
buttonPressed = alert("MyTardis Desktop Sync", message, ["OK","Cancel"])
if buttonPressed==okButtonID:
NSLog(u"OK button pressed.")
for localFolderSubdirectoryToDelete in localFolderSubdirectoriesToDelete:
shutil.rmtree(os.path.join(self.localFolder,localFolderSubdirectoryToDelete))
else:
NSLog(u"Cancel button pressed.")
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
self.deleteUnusedLocalExperimentFoldersRunning = False
return
self.deleteUnusedLocalExperimentFoldersRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('getDatasetsForExperiments', None, None)
@objc.signature('v@:@')
def getDatasetsForExperiments_(self,sender):
if (hasattr(self,'getDatasetsForExperimentsRunning') and self.getDatasetsForExperimentsRunning):
return
self.getDatasetsForExperimentsRunning = True
self.menuMakerDelegate.statusitem.setImage_(self.tardisRefreshStatusBarIcon)
def getDatasetsForExperimentsThread():
pool = NSAutoreleasePool.alloc().init()
self.countTotalRemoteDatasetsInOptedInExperiments = 0
self.experimentDatasetsModel = {}
for experimentID,experiment in self.userExperimentsModel.getExperiments().iteritems():
if not experiment.getOptIn():
continue
self.experimentDatasetsModel[experimentID] = ExperimentDatasetsModel(self.mytardisUrl,self.username,self.password,experimentID)
self.experimentDatasetsModel[experimentID].parseMyTardisDatasetList()
errorMessage = self.experimentDatasetsModel[experimentID].getUnhandledExceptionMessage()
if errorMessage is not None:
NSLog(u"My Exception: " + unicode(errorMessage))
self.menuMakerDelegate.statusitem.performSelectorOnMainThread_withObject_waitUntilDone_("setImage:",self.tardisCrossStatusBarIcon,0)
#alert("MyTardis Desktop Sync", errorMessage, ["OK"])
alertPanel = NSAlert.alloc().init()
alertPanel.setMessageText_("MyTardis Desktop Sync")
alertPanel.setInformativeText_(errorMessage)
alertPanel.setAlertStyle_(NSInformationalAlertStyle)
alertPanel.addButtonWithTitle_("OK")
NSApp.activateIgnoringOtherApps_(True)
alertPanel.performSelectorOnMainThread_withObject_waitUntilDone_("runModal",None,1)
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
self.getDatasetsForExperimentsRunning = False
return
self.countTotalRemoteDatasetsInOptedInExperiments = self.countTotalRemoteDatasetsInOptedInExperiments + self.experimentDatasetsModel[experimentID].getNumberOfDatasets()
self.getDatasetsForExperimentsRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('getDatasetFilesForDatasets', None, None)
thread = threading.Thread(target = getDatasetsForExperimentsThread)
thread.start()
@objc.signature('v@:@')
def getDatasetFilesForDatasets_(self,sender):
if (hasattr(self,'getDatasetFilesForDatasetsRunning') and self.getDatasetFilesForDatasetsRunning):
return
self.getDatasetFilesForDatasetsRunning = True
def getDatasetFilesForDatasetsThread():
pool = NSAutoreleasePool.alloc().init()
self.countTotalRemoteDatasetFilesInOptedInExperiments = 0
self.experimentDatasetFilesModel = {}
for experimentID,experiment in self.userExperimentsModel.getExperiments().iteritems():
if not experiment.getOptIn():
continue
self.experimentDatasetFilesModel[experimentID] = dict()
for datasetID,dataset in self.experimentDatasetsModel[experimentID].getDatasets().iteritems():
self.experimentDatasetFilesModel[experimentID][datasetID] = DatasetFilesModel(self.mytardisUrl,self.username,self.password,datasetID)
self.experimentDatasetFilesModel[experimentID][datasetID].parseMyTardisDatafileList()
errorMessage = self.experimentDatasetFilesModel[experimentID][datasetID].getUnhandledExceptionMessage()
if errorMessage is not None:
NSLog(unicode(errorMessage))
self.menuMakerDelegate.statusitem.performSelectorOnMainThread_withObject_waitUntilDone_("setImage:",self.tardisCrossStatusBarIcon,0)
#alert("MyTardis Desktop Sync", errorMessage, ["OK"])
alertPanel = NSAlert.alloc().init()
alertPanel.setMessageText_("MyTardis Desktop Sync")
alertPanel.setInformativeText_(errorMessage)
alertPanel.setAlertStyle_(NSInformationalAlertStyle)
alertPanel.addButtonWithTitle_("OK")
NSApp.activateIgnoringOtherApps_(True)
alertPanel.performSelectorOnMainThread_withObject_waitUntilDone_("runModal",None,1)
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
self.getDatasetFilesForDatasetsRunning = False
return
self.countTotalRemoteDatasetFilesInOptedInExperiments = self.countTotalRemoteDatasetFilesInOptedInExperiments + self.experimentDatasetFilesModel[experimentID][datasetID].getNumberOfDatafiles()
self.getDatasetFilesForDatasetsRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('createLocalExperimentFolders', None, None)
thread = threading.Thread(target = getDatasetFilesForDatasetsThread)
thread.start()
@objc.signature('v@:@')
def createLocalExperimentFolders_(self,sender):
if (hasattr(self,'createLocalExperimentFoldersRunning') and self.createLocalExperimentFoldersRunning):
return
self.createLocalExperimentFoldersRunning = True
def createLocalExperimentFoldersThread():
pool = NSAutoreleasePool.alloc().init()
for experimentID,experiment in self.userExperimentsModel.getExperiments().iteritems():
if not experiment.getOptIn():
continue
if not os.path.exists(os.path.join(self.localFolder, experiment.getDirectoryName())):
try:
os.mkdir(os.path.join(self.localFolder, experiment.getDirectoryName()))
except:
NSLog(u"Failed to create directory: " + os.path.join(self.localFolder, experiment.getDirectoryName()))
try:
# Force finder window to refresh:
#os.system('open "' + self.localFolder + '"')
# The following will give an error if there is no Finder window open,
# but if the user has the local folder open in Finder, then this
# will force the window to refresh after each experiment is added.
applescript = 'tell application "Finder" to delete (make new folder at (front window))'
runApplescript(applescript)
except:
NSLog(unicode(traceback.format_exc()))
self.createLocalExperimentFoldersRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('createLocalDatasetFolders', None, None)
thread = threading.Thread(target = createLocalExperimentFoldersThread)
thread.start()
@objc.signature('v@:@')
def createLocalDatasetFolders_(self,sender):
if (hasattr(self,'createLocalDatasetFoldersRunning') and self.createLocalDatasetFoldersRunning):
return
self.createLocalDatasetFoldersRunning = True
def createLocalDatasetFoldersThread():
pool = NSAutoreleasePool.alloc().init()
countLocalDatasetFoldersCreated = 0
for experimentID,experiment in self.userExperimentsModel.getExperiments().iteritems():
if not experiment.getOptIn():
continue
for datasetID,dataset in self.experimentDatasetsModel[experimentID].getDatasets().iteritems():
if not os.path.exists(os.path.join(self.localFolder, experiment.getDirectoryName(), dataset.getDirectoryName())):
try:
os.mkdir(os.path.join(self.localFolder, experiment.getDirectoryName(), dataset.getDirectoryName()))
countLocalDatasetFoldersCreated = countLocalDatasetFoldersCreated + 1
except:
NSLog(u"Failed to create directory: " + os.path.join(self.localFolder, experiment.getDirectoryName(), dataset.getDirectoryName()))
if countLocalDatasetFoldersCreated > 0:
isSticky = False
GrowlApplicationBridge.notifyWithTitle_description_notificationName_iconData_priority_isSticky_clickContext_(
"MyTardis Desktop Sync Client",
str(countLocalDatasetFoldersCreated) + " data set folder(s) were created in " + self.localFolder,
"growlNotification1", None, 0, isSticky, "This will be the argument to the context callback")
self.createLocalDatasetFoldersRunning = False
self.notificationCenter.postNotificationName_object_userInfo_('downloadDatasetFiles', None, None)
thread = threading.Thread(target = createLocalDatasetFoldersThread)
thread.start()
@objc.signature('v@:@')
def downloadDatasetFiles_(self,sender):
if (hasattr(self,'downloadDatasetFilesRunning') and self.downloadDatasetFilesRunning):
return
self.downloadDatasetFilesRunning = True
def downloadDatasetFilesThread():
pool = NSAutoreleasePool.alloc().init()
try:
session = requests.session()
r = session.post(self.mytardisUrl + '/login/', {'username':self.username,'password':self.password}, verify=False)
if r.status_code==200:
NSLog(u"MyTardis authentication succeeded for username: " + self.username)
pass
else:
# raise Exception("MyTardis authentication failed for username: " + self.username)
# Combining Python try/except with Objective-C try/catch is complicated.
self.unhandledExceptionMessage = "MyTardis authentication failed for username: " + self.username
NSLog(unicode(self.unhandledExceptionMessage))
self.downloadDatasetFilesRunning = False
return
except:
NSLog(unicode(traceback.format_exc()))
countDatafilesDownloaded = 0
exceptionOccurred = False
for experimentID,experiment in self.userExperimentsModel.getExperiments().iteritems():
if not experiment.getOptIn():
continue
NSLog(u"experiment.getDirectoryName() = " + experiment.getDirectoryName())
for datasetID,dataset in self.experimentDatasetsModel[experimentID].getDatasets().iteritems():
NSLog(u"dataset.getDirectoryName() = " + dataset.getDirectoryName())
for datafileID,datafile in self.experimentDatasetFilesModel[experimentID][datasetID].getDatafiles().iteritems():
if datafile.getFileName() is None:
continue
NSLog(u"datafile.getFileName() = " + datafile.getFileName())
pathToDownloadTo = os.path.join(self.localFolder, experiment.getDirectoryName(), dataset.getDirectoryName(), datafile.getFileName())
NSLog(u"pathToDownloadTo = " + pathToDownloadTo)
# We really should do an MD5 hash check here, not just check whether the file exists:
if not os.path.exists(pathToDownloadTo):
NSLog(u"Data file doesn't exist locally. Downloading from server.")
datafile.downloadTo(pathToDownloadTo,session)
errorMessage = datafile.getUnhandledExceptionMessage()
if errorMessage is not None:
NSLog(unicode(errorMessage))
exceptionOccurred = True
# For now, we will allow the sync-ing to continue even if it fails for some data file(s).
else:
countDatafilesDownloaded = countDatafilesDownloaded + 1
else:
NSLog(u"Data file already exists locally. Not downloading from server.")
try:
session.close()
except:
NSLog(unicode(traceback.format_exc()))
if exceptionOccurred:
self.menuMakerDelegate.statusitem.performSelectorOnMainThread_withObject_waitUntilDone_("setImage:",self.tardisCrossStatusBarIcon,0)
else:
self.menuMakerDelegate.statusitem.performSelectorOnMainThread_withObject_waitUntilDone_("setImage:",self.tardisTickStatusBarIcon,0)
if countDatafilesDownloaded > 0:
isSticky = False
GrowlApplicationBridge.notifyWithTitle_description_notificationName_iconData_priority_isSticky_clickContext_(
"MyTardis Desktop Sync Client",
str(countDatafilesDownloaded) + " data files were downloaded from MyTardis.",
"growlNotification1", None, 0, isSticky, "This will be the argument to the context callback")
self.downloadDatasetFilesRunning = False
thread = threading.Thread(target = downloadDatasetFilesThread)
thread.start()
@objc.IBAction
def onExperimentsPanelOKButtonClicked_(self, sender):
NSApplication.sharedApplication().stopModalWithCode_(NSOKButton)
self.experimentsPanel.performClose_(sender)
@objc.IBAction
def onExperimentsPanelCancelButtonClicked_(self, sender):
NSApplication.sharedApplication().stopModalWithCode_(NSCancelButton)
self.experimentsPanel.performClose_(sender)
@objc.IBAction
def onBrowse_(self, sender):
NSLog(u"Browse button pressed!")
panel = NSOpenPanel.openPanel()
panel.setCanCreateDirectories_(True)
panel.setCanChooseDirectories_(True)
panel.setCanChooseFiles_(False)
if panel.runModal() == NSOKButton:
NSLog(unicode(panel.filename()))
self.localFolderField.setStringValue_(panel.filename())
@objc.IBAction
def onCancelFromExperimentsPanel_(self, sender):
NSLog(u"onCancelFromExperimentsPanel_")
self.experimentsPanel.performClose_(sender)
if __name__ == "__main__":
appDirs = appdirs.AppDirs("MyTardis Desktop Sync", "Monash University")
appUserDataDir = appDirs.user_data_dir
# Add trailing slash:
appUserDataDir = os.path.join(appUserDataDir,"")
if not os.path.exists(appUserDataDir):
os.makedirs(appUserDataDir)
sys.modules[__name__].globalConfig = ConfigParser.RawConfigParser(allow_no_value=True)
globalConfig = sys.modules[__name__].globalConfig
sys.modules[__name__].globalPreferencesFilePath = os.path.join(appUserDataDir,"Global Preferences.cfg")
globalPreferencesFilePath = sys.modules[__name__].globalPreferencesFilePath
if os.path.exists(globalPreferencesFilePath):
globalConfig.read(globalPreferencesFilePath)
if not globalConfig.has_section("Global Preferences"):
globalConfig.add_section("Global Preferences")
# set up system statusbar GUI
app = NSApplication.sharedApplication()
delegate = MenuMakerDelegate.alloc().init()
app.setDelegate_(delegate)
# set up growl delegate
rcGrowlDelegate=rcGrowl.new()
rcGrowlDelegate.rcSetDelegate()
windowController = MyTardisGrowlTest.alloc().initWithWindowNibName_("MyTardisGrowlTest")
sender = None
windowController.showWindow_(sender)
#windowController.settingsPanel.orderOut_(sender)
windowController.usernameField.becomeFirstResponder()
windowController.settingsPanel.makeKeyAndOrderFront_(sender)
# Bring app to top
NSApp.activateIgnoringOtherApps_(True)
AppHelper.runEventLoop()
| monash-merc/mytardis-desktop-sync-macosx | desktopSync.py | Python | gpl-3.0 | 36,559 |
'''Contains the Core classes of the PEATSA command line tool'''
import ProteinDesignTool, Data, Exceptions, PEATSAParallel
| dmnfarrell/peat | PEATSA/Core/__init__.py | Python | mit | 123 |
"""
:copyright: Copyright (C) 2013-2017 Gianluca Costa.
:license: LGPLv3, see LICENSE for details.
""" | giancosta86/Iris | info/gianlucacosta/iris/io/__init__.py | Python | lgpl-3.0 | 102 |
#!/usr/bin/python
import sys
import string
def howto_install_setuptools():
print """Error: You need setuptools Python package!"""
try:
from setuptools import setup
params = {
'zip_safe': True
}
except ImportError:
for arg in sys.argv:
if string.find(arg, 'egg') != -1:
howto_install_setuptools()
sys.exit(1)
from distutils.core import setup
params = {}
params.update({
'name': 'crawler',
'version': '0.0.1b',
'description': 'Elements for the HC generic crawler',
'author': 'Worldine',
'license': 'Proprietary',
'packages': [
'reduce'
],
'scripts': [
'listener.py'
]
})
apply(setup, (), params);
| redcurrant/redcurrant | crawler/listener/setup.py | Python | lgpl-3.0 | 632 |
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py 5023 2010/06/14 22:05:46 scons"
import posix
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/sw/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Platform/darwin.py | Python | gpl-3.0 | 1,758 |
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields
class ResCountry(models.Model):
_inherit = 'res.country'
bc_code = fields.Char('Codigo BC', size=5)
ibge_code = fields.Char('Codigo IBGE', size=5)
siscomex_code = fields.Char('Codigo Siscomex', size=4)
class ResCountryState(models.Model):
_inherit = 'res.country.state'
ibge_code = fields.Char('Codigo IBGE', size=2)
| rvalyi/l10n-brazil | l10n_br_base/models/res_country.py | Python | agpl-3.0 | 517 |
# -*- coding: utf-8 -*-
import pytest
from enma.extensions import mail
@pytest.mark.skipif('False', reason='requires a working email configuration')
def test_reset_passwd_email_sent(user, testapp):
"""
Test if a send password email is to be send
"""
with mail.record_messages() as outbox:
from enma.user.mail import send_reset_password_link
assert len(outbox) == 0
send_reset_password_link(user)
assert len(outbox) == 1
| pixmeter/enma | tests/test_enma/user/test_mail.py | Python | bsd-3-clause | 473 |
# stdlib imports
import urllib
import urlparse
def replace_query_param(url, key, val):
"""
Given a URL and a key/val pair, set or replace an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = dict(urlparse.parse_qs(query))
if val is None:
try:
del query_dict[key]
except KeyError:
pass
else:
query_dict[key] = val
for k, v in query_dict.items():
if isinstance(v, list):
query_dict[k] = v[0]
query = urllib.urlencode(query_dict.items())
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
| paddycarey/beardo-control | app/app/utils/misc.py | Python | mit | 714 |
#!/usr/bin/env python
from distutils.core import setup
execfile('bittle/version.py')
kwargs = {
"name": "bittle",
"version": str(__version__),
"packages": ["bittle", "bittle.tests"],
"description": "Simple library to help with bit manipulations.",
# PyPi, despite not parsing markdown, will prefer the README.md to the
# standard README. Explicitly read it here.
"long_description": open("README").read(),
"author": "Gary M. Josack",
"maintainer": "Gary M. Josack",
"author_email": "[email protected]",
"maintainer_email": "[email protected]",
"license": "MIT",
"url": "https://github.com/gmjosack/bittle",
"download_url": "https://github.com/gmjosack/bittle/archive/master.tar.gz",
"classifiers": [
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
]
}
setup(**kwargs)
| gmjosack/bittle | setup.py | Python | mit | 995 |
# -*- coding: utf-8 -*-
from client import Client
from exc import RajaOngkirExc
| kalarau/rajaongkir-python | RajaOngkir/__init__.py | Python | bsd-2-clause | 80 |
from copy import copy
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.db.models import CharField, FloatField, IntegerField
from django.db.models.signals import post_delete, post_save
from mezzanine.utils.deprecation import get_related_model
class BaseGenericRelation(GenericRelation):
"""
Extends ``GenericRelation`` to:
- Add a consistent default value for ``object_id_field`` and
check for a ``default_related_model`` attribute which can be
defined on subclasses as a default for the ``to`` argument.
- Add one or more custom fields to the model that the relation
field is applied to, and then call a ``related_items_changed``
method each time related items are saved or deleted, so that a
calculated value can be stored against the custom fields since
aggregates aren't available for GenericRelation instances.
"""
# Mapping of field names to model fields that will be added.
fields = {}
def __init__(self, *args, **kwargs):
"""
Set up some defaults and check for a ``default_related_model``
attribute for the ``to`` argument.
"""
kwargs.setdefault("object_id_field", "object_pk")
to = getattr(self, "default_related_model", None)
# Avoid having both a positional arg and a keyword arg for
# the parameter ``to``
if to and not args:
kwargs.setdefault("to", to)
try:
# Check if ``related_model`` has been modified by a subclass
self.related_model
except (AppRegistryNotReady, AttributeError):
# if not, all is good
super().__init__(*args, **kwargs)
else:
# otherwise, warn the user to stick to the new (as of 4.0)
# ``default_related_model`` attribute
raise ImproperlyConfigured(
"BaseGenericRelation changed the "
"way it handled a default ``related_model`` in mezzanine "
"4.0. Please override ``default_related_model`` instead "
"and do not tamper with django's ``related_model`` "
"property anymore."
)
def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple {} fields are not supported ({}.{}, {}.{})".format(
self.__class__.__name__,
cls.__name__,
cls.__name__,
name,
field.name,
)
raise ImproperlyConfigured(e)
self.related_field_name = name
super().contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
extant_fields = cls._meta._forward_fields_map
if name_string in extant_fields:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
sender = get_related_model(self)
post_save.connect(self._related_items_changed, sender=sender)
post_delete.connect(self._related_items_changed, sender=sender)
def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
for_model = kwargs["instance"].content_type.model_class()
if for_model and issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager)
def related_items_changed(self, instance, related_manager):
"""
Can be implemented by subclasses - called whenever the
state of related items change, eg they're saved or deleted.
The instance for this field and the related manager for the
field are passed as arguments.
"""
pass
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
See: https://code.djangoproject.com/ticket/22552
"""
return getattr(obj, self.attname).all()
class CommentsField(BaseGenericRelation):
"""
Stores the number of comments against the
``COMMENTS_FIELD_NAME_count`` field when a comment is saved or
deleted.
"""
default_related_model = "generic.ThreadedComment"
fields = {"%s_count": IntegerField(editable=False, default=0)}
def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % self.related_field_name
setattr(instance, count_field_name, count)
instance.save()
class KeywordsField(BaseGenericRelation):
"""
Stores the keywords as a single string into the
``KEYWORDS_FIELD_NAME_string`` field for convenient access when
searching.
"""
default_related_model = "generic.AssignedKeyword"
fields = {"%s_string": CharField(editable=False, blank=True, max_length=500)}
def __init__(self, *args, **kwargs):
"""
Mark the field as editable so that it can be specified in
admin class fieldsets and pass validation, and also so that
it shows up in the admin form.
"""
super().__init__(*args, **kwargs)
self.editable = True
def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from mezzanine.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super().formfield(**kwargs)
def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from mezzanine.generic.models import Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [related_manager.create(keyword_id=i) for i in new_ids]
# Remove keywords that are no longer assigned to anything.
Keyword.objects.delete_unused(removed_ids)
getattr(instance, self.name).set(data)
def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super().contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight
def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join(str(a.keyword) for a in assigned)
string_field_name = list(self.fields.keys())[0] % self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save()
class RatingField(BaseGenericRelation):
"""
Stores the rating count and average against the
``RATING_FIELD_NAME_count`` and ``RATING_FIELD_NAME_average``
fields when a rating is saved or deleted.
"""
default_related_model = "generic.Rating"
fields = {
"%s_count": IntegerField(default=0, editable=False),
"%s_sum": IntegerField(default=0, editable=False),
"%s_average": FloatField(default=0, editable=False),
}
def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save()
| stephenmcd/mezzanine | mezzanine/generic/fields.py | Python | bsd-2-clause | 10,992 |
"""
Copyright (C) 2014, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import platform
import sqlite3
import csv
import configparser
import pyoptimization.utility
def get_data_settings(path, delimiter='\t'):
f = open(path, 'r')
reader = csv.reader(f, delimiter=delimiter)
dataSettings = {}
for ext, column, converter in reader:
converter = eval(converter)
dataSettings[ext] = (column, converter)
return dataSettings
def get_properties_settings(path, delimiter='\t'):
f = open(path, 'r')
reader = csv.reader(f, delimiter=delimiter)
propertiesSettings = []
for column, converter in reader:
converter = eval(converter)
propertiesSettings.append((column, converter))
return propertiesSettings
def full_split_path(path):
components = []
while True:
path, folder = os.path.split(path)
if folder:
components.append(folder)
else:
if path:
components.append(path)
break
components.reverse()
return components
def get_properties(components, propertiesSettings):
properties = []
for column, converter in propertiesSettings:
properties.append((column, converter(components)))
return properties
def group_names(filenames):
groups = {}
for filename in filenames:
name, ext = os.path.splitext(filename)
if name in groups:
groups[name].append(ext)
else:
groups[name] = [ext]
return groups
def group_properties(name, group, dataSettings, parent):
properties = []
for ext in group:
if ext in dataSettings:
filename = name + ext
pathData = os.path.join(parent, filename)
if not os.path.isdir(pathData):
column, converter = dataSettings[ext]
f = open(pathData, 'r')
data = f.read()
f.close()
data = converter(data)
properties.append((column, data))
return properties
def main():
config = configparser.ConfigParser()
pyoptimization.utility.read_config(config, __file__)
# Database
dataSettings = get_data_settings(os.path.expandvars(config.get('importer', 'data_settings')))
propertiesSettings = get_properties_settings(os.path.expandvars(config.get('importer', 'properties_settings')))
root = os.path.expandvars(config.get('importer', 'root.' + platform.system()))
conn = sqlite3.connect(os.path.expandvars(config.get('database', 'file.' + platform.system())))
table = config.get('database', 'table')
cursor = conn.cursor()
for parent, _, filenames in os.walk(root):
groups = group_names(filenames)
for name in groups:
properties = group_properties(name, groups[name], dataSettings, parent)
if properties:
path = os.path.join(parent, name)
pathRelative = os.path.relpath(path, root)
print(path + '.*')
components = full_split_path(pathRelative)
_properties = get_properties(components, propertiesSettings)
print(_properties)
properties += _properties
# Insert into database
columns, rowData = zip(*properties)
sql = 'INSERT INTO %s %s VALUES (%s)' % (table, str(columns), ', '.join(['?'] * len(rowData)))
cursor.execute(sql, rowData)
conn.commit()
print('Finished normally')
if __name__ == '__main__':
main()
| O-T-L/PyOptimization | importer.py | Python | lgpl-3.0 | 4,181 |
# Create a variable named db that is an SqliteDatabase with a filename of challenges.db.
from peewee import *
db = SqliteDatabase('challenges.db')
class Challenge(Model):
name = CharField(max_length=100)
language = CharField(max_length=100)
steps = IntegerField(default=1)
# Now add db as the database attribute in the Meta class for Challenge.
class Meta:
database = db
# Finally, create a function named initialize. Your initialize() function should connect to the database
# and then create the Challenge table. Make sure it creates the table safely.
def initialize():
db.connect()
db.create_tables([Challenge], safe=True) | CaseyNord/Treehouse | Using Databases in Python/initialize.py | Python | mit | 689 |
#Author Emily Keiser
def addition(x,y):
return int(x)+int(y)
def subtraction (x,y) :
return int(x) -int(y)
def multiplication (x,y) :
return int(x) *int(y)
def module (x,y) :
return int(x) %int(y)
a=raw_input("Enter variable a: ")
b=raw_input("Enter variable b: ")
print addition (a,b)
print subtraction (a,b)
print multiplication (a,b)
print module (a,b)
#addition
#c=int(a)+int(b)
#Output?
#print(c)
#subtraction
#d=int(a)-int(b)
#print(d)
#multiplication
#e=int(a)*int(b)
#print(e)
#module
#f=36%5
#print(f)
| davidvillaciscalderon/PythonLab | Session 3/basic_operations_with_function.py | Python | mit | 537 |
from bulbs.model import Node, Relationship
from bulbs.property import String, Integer, DateTime, Bool, Float, Null
from bulbs.utils import current_datetime
def uuidify():
from uuid import uuid4
return str(uuid4())
class NetworkDeviceType(Node):
element_type = "NetworkDeviceType"
uuid = String(default=uuidify)
name = String()
class DeviceType(Node):
element_type = "DeviceType"
uuid = String(default=uuidify)
name = String()
class LinkType(Node):
element_type = "LinkType"
uuid = String(default=uuidify)
name = String()
class Facility(Node):
element_type = "Facility"
uuid = String(default=uuidify)
name = String()
class Rack(Node):
element_type = "Rack"
uuid = String(default=uuidify)
name = String()
lon=Float()
lat = Float()
class NetworkDevice(Node):
element_type = "NetworkDevice"
uuid = String(default=uuidify)
name = String()
ip = String(nullable=False)
virtual = Bool(default=False)
class SSHAgent(Node):
element_type = "SSHAgent"
uuid = String(default=uuidify)
username = String(nullable=False)
key = String(nullable=False)
host_key=String()
def run_command(command):
import paramiko
client = paramiko.SSHClient()
if self.host_key is None:
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
client.load_system_host_keys()
client.connect(self.device.ip)
stdin, stdout, stderr = client.exec_command(command)
return stdin, stdout, stderr
class Service(Node):
element_type = "Service"
uuid = String(default=uuidify)
name = String()
service_type = String()
class Storage(Node):
element_type = "Storage"
uuid = String(default=uuidify)
name = String()
class Interface(Node):
element_type = "Interface"
uuid = String(default=uuidify)
name = String()
class Link(Node):
element_type = "Link"
uuid = String(default=uuidify)
bidirectional = Bool(default=True)
class Connects(Relationship):
label="connects"
class Contains(Relationship):
label="contains"
| sirmmo/mmonet | mmonet/inventory/models.py | Python | mit | 1,958 |
from __future__ import absolute_import
import os
import ast
import logging
import json
import requests
import base64
import pytz
from datetime import datetime, timedelta
from urlparse import urlparse
from OpenSSL import crypto
from .exceptions import InternalError
log = logging.getLogger(__name__)
ALEXA_APP_IDS = dict([(str(os.environ[envvar]), envvar.replace("ALEXA_APP_ID_", "")) for envvar in os.environ.keys() if envvar.startswith('ALEXA_APP_ID_')])
ALEXA_REQUEST_VERIFICATON = ast.literal_eval(os.environ.get('ALEXA_REQUEST_VERIFICATON', 'True'))
def validate_reponse_limit(value):
"""
value - response content
"""
if len(value.encode('utf-8')) > 1000 * 1000 * 24:
msg = "Alexa response content is bigger then 24 kilobytes: {0}".format(value)
raise InternalError(msg)
def validate_app_ids(value):
"""
value - an alexa app id
"""
if value not in ALEXA_APP_IDS.keys():
msg = "{0} is not one of the valid alexa skills application ids for this service".format(value)
raise InternalError(msg)
def validate_current_timestamp(value):
"""
value - a timestamp formatted in ISO 8601 (for example, 2015-05-13T12:34:56Z).
"""
timestamp = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
utc_timestamp = pytz.utc.localize(timestamp)
utc_timestamp_now = pytz.utc.localize(datetime.utcnow())
delta = utc_timestamp - utc_timestamp_now
log.debug("DATE TIME CHECK!")
log.debug("Alexa: {0}".format(utc_timestamp))
log.debug("Server: {0}".format(utc_timestamp_now))
log.debug("Delta: {0}".format(delta))
return False if delta > timedelta(minutes=2, seconds=30) else True
def validate_char_limit(value):
"""
value - a serializer to check to make sure the character limit is not excceed
"""
data = json.dumps(value)
if len(data) > 8000:
msg = "exceeded the total character limit of 8000: {1}".format(data)
raise InternalError(msg)
def verify_cert_url(cert_url):
"""
Verify the URL location of the certificate
"""
if cert_url is None:
return False
parsed_url = urlparse(cert_url)
if parsed_url.scheme == 'https':
if parsed_url.hostname == "s3.amazonaws.com":
if os.path.normpath(parsed_url.path).startswith("/echo.api/"):
return True
return False
def verify_signature(request_body, signature, cert_url):
"""
Verify the request signature is valid.
"""
if signature is None or cert_url is None:
return False
cert_str = requests.get(cert_url)
certificate = crypto.load_certificate(crypto.FILETYPE_PEM, str(cert_str.text))
if certificate.has_expired() is True:
return False
if certificate.get_subject().CN != "echo-api.amazon.com":
return False
decoded_signature = base64.b64decode(signature)
try:
if crypto.verify(certificate, decoded_signature, request_body, 'sha1') is None:
return True
except:
raise InternalError("Error occured during signature validation")
return False
def validate_alexa_request(request_headers, request_body):
"""
Validates this is a valid alexa request
value - a django request object
"""
if ALEXA_REQUEST_VERIFICATON is True:
timestamp = json.loads(request_body)['request']['timestamp']
if validate_current_timestamp(timestamp) is False:
raise InternalError("Invalid Request Timestamp")
if verify_cert_url(request_headers.get('HTTP_SIGNATURECERTCHAINURL')) is False:
raise InternalError("Invalid Certificate Chain URL")
if verify_signature(request_body, request_headers.get('HTTP_SIGNATURE'), request_headers.get('HTTP_SIGNATURECERTCHAINURL')) is False:
raise InternalError("Invalid Request Signature")
| rishikapadia/echoyumi | django_alexa/internal/validation.py | Python | gpl-3.0 | 3,833 |
def prepare_test_post_data(test, resource):
post_data = resource.get_test_post_data()
try:
post_data = resource._meta.testdata.setup_post(test, post_data) or post_data
except:
pass
return post_data
| mozilla/inventory | vendor-local/src/django-tastytools/tastytools/test/definitions/helpers.py | Python | bsd-3-clause | 232 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.conf import settings
from django.utils.translation import ugettext as _
import horizon
from horizon import api
from horizon import exceptions
from horizon import users
from horizon.base import Horizon
from horizon.views.auth_forms import Login, LoginWithTenant, _set_session_data
LOG = logging.getLogger(__name__)
def user_home(request):
""" Reversible named view to direct a user to the appropriate homepage. """
return shortcuts.redirect(horizon.get_user_home(request.user))
def login(request):
"""
Logs in a user and redirects them to the URL specified by
:func:`horizon.get_user_home`.
"""
if request.user.is_authenticated():
user = users.User(users.get_user_from_request(request))
return shortcuts.redirect(Horizon.get_user_home(user))
form, handled = Login.maybe_handle(request)
if handled:
return handled
# FIXME(gabriel): we don't ship a template named splash.html
return shortcuts.render(request, 'splash.html', {'form': form})
def switch_tenants(request, tenant_id):
"""
Swaps a user from one tenant to another using the unscoped token from
Keystone to exchange scoped tokens for the new tenant.
"""
form, handled = LoginWithTenant.maybe_handle(
request, initial={'tenant': tenant_id,
'username': request.user.username})
if handled:
return handled
unscoped_token = request.session.get('unscoped_token', None)
if unscoped_token:
try:
token = api.token_create_scoped(request,
tenant_id,
unscoped_token)
_set_session_data(request, token)
user = users.User(users.get_user_from_request(request))
return shortcuts.redirect(Horizon.get_user_home(user))
except Exception, e:
exceptions.handle(request,
_("You are not authorized for that tenant."))
return shortcuts.redirect("horizon:auth_login")
def logout(request):
""" Clears the session and logs the current user out. """
request.session.clear()
# FIXME(gabriel): we don't ship a view named splash
return shortcuts.redirect('splash')
| rcbops/horizon-buildpackage | horizon/views/auth.py | Python | apache-2.0 | 3,134 |
from .base import Browser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from ..webdriver_server import ChromeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor, # noqa: F401
WebDriverCrashtestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "chrome",
"check_args": "check_args",
"browser": "ChromeBrowser",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor",
"crashtest": "WebDriverCrashtestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier",}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["supports_eager_pageload"] = False
capabilities = {
"goog:chromeOptions": {
"prefs": {
"profile": {
"default_content_setting_values": {
"popups": 1
}
}
},
"useAutomationExtension": False,
"excludeSwitches": ["enable-automation"],
"w3c": True
}
}
if test_type == "testharness":
capabilities["pageLoadStrategy"] = "none"
chrome_options = capabilities["goog:chromeOptions"]
if kwargs["binary"] is not None:
chrome_options["binary"] = kwargs["binary"]
# Here we set a few Chrome flags that are always passed.
# ChromeDriver's "acceptInsecureCerts" capability only controls the current
# browsing context, whereas the CLI flag works for workers, too.
chrome_options["args"] = ["--ignore-certificate-errors"]
# Allow audio autoplay without a user gesture.
chrome_options["args"].append("--autoplay-policy=no-user-gesture-required")
# Allow WebRTC tests to call getUserMedia.
chrome_options["args"].append("--use-fake-ui-for-media-stream")
chrome_options["args"].append("--use-fake-device-for-media-stream")
# Shorten delay for Reporting <https://w3c.github.io/reporting/>.
chrome_options["args"].append("--short-reporting-delay")
# Point all .test domains to localhost for Chrome
chrome_options["args"].append("--host-resolver-rules=MAP nonexistent.*.test ~NOTFOUND, MAP *.test 127.0.0.1")
# Copy over any other flags that were passed in via --binary_args
if kwargs["binary_args"] is not None:
chrome_options["args"].extend(kwargs["binary_args"])
# Pass the --headless flag to Chrome if WPT's own --headless flag was set
if kwargs["headless"] and "--headless" not in chrome_options["args"]:
chrome_options["args"].append("--headless")
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1"}
class ChromeBrowser(Browser):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver",
webdriver_args=None):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.server = ChromeDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
| DominoTree/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/chrome.py | Python | mpl-2.0 | 5,222 |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
# Examples:
# url(r'^$', '{{ project_name }}.views.home', name='home'),
# url(r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| archen/django-project | project_name/project_name/urls.py | Python | mit | 698 |
'''
file clean_dvi.py
This file is part of LyX, the document processor.
Licence details can be found in the file COPYING
or at http://www.lyx.org/about/licence.php
author Angus Leeming
Full author contact details are available in the file CREDITS
or at http://www.lyx.org/about/credits.php
Usage:
python clean_dvi.py infile.dvi outfile.dvi
clean_dvi modifies the input .dvi file so that
dvips and yap (a dvi viewer on Windows) can find
any embedded PostScript files whose names are protected
with "-quotes.
It works by:
1 translating the machine readable .dvi file to human
readable .dtl form,
2 manipulating any references to external files
3 translating the .dtl file back to .dvi format.
It requires dv2dt and dt2dv from the DTL dviware package
http://www.ctan.org/tex-archive/dviware/dtl/
'''
import os, re, subprocess, sys
def usage(prog_name):
return 'Usage: %s in.dvi out.dvi\n' \
% os.path.basename(prog_name)
def warning(message):
sys.stderr.write(message + '\n')
def error(message):
sys.stderr.write(message + '\n')
sys.exit(1)
def manipulated_dtl(data):
psfile_re = re.compile(r'(special1 +)([0-9]+)( +\'PSfile=")(.*)(" llx=.*)')
lines = data.split('\n')
for i in range(len(lines)):
line = lines[i]
match = psfile_re.match(line)
if match != None:
file = match.group(4)
filelen = len(file)
file = file.replace('"', '')
# Don't forget to update the length of the string too...
strlen = int(match.group(2)) - (filelen - len(file))
lines[i] = '%s%d%s%s%s' \
% ( match.group(1), strlen, match.group(3),
file, match.group(5) )
return '\n'.join(lines)
def main(argv):
# First establish that the expected information has
# been input on the command line and whether the
# required executables exist.
if len(argv) != 3:
error(usage(argv[0]))
infile = argv[1]
outfile = argv[2]
if not os.path.exists(infile):
error('Unable to read "%s"\n' % infile)
# Convert the input .dvi file to .dtl format.
if os.name == 'nt':
unix = False
else:
unix = True
dv2dt_call = 'dv2dt "%s"' % infile
dv2dt_pipe = subprocess.Popen(dv2dt_call, universal_newlines=True, \
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \
shell=unix, close_fds=unix)
(dv2dt_stdin, dv2dt_stdout, dv2dt_stderr) = \
(dv2dt_pipe.stdin, dv2dt_pipe.stdout, dv2dt_pipe.stderr)
dv2dt_stdin.close()
dv2dt_data = dv2dt_stdout.read()
dv2dt_status = dv2dt_stdout.close()
if dv2dt_status != None or len(dv2dt_data) == 0:
dv2dt_err = dv2dt_stderr.read()
error("Failed: %s\n%s\n" % ( dv2dt_call, dv2dt_err) )
# Manipulate the .dtl file.
dtl_data = manipulated_dtl(dv2dt_data)
if dtl_data == None:
error("Failed to manipulate the dtl file")
# Convert this .dtl file back to .dvi format.
dt2dv_call = 'dt2dv -si "%s"' % outfile
dt2dv_stdin = os.popen(dt2dv_call, 'w')
dt2dv_stdin.write(dtl_data)
if __name__ == "__main__":
main(sys.argv)
| bpiwowar/lyx | lib/scripts/clean_dvi.py | Python | gpl-2.0 | 3,211 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 30 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 30 * 60
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_dip3_deterministicmns.py', # NOTE: needs dash_hash to pass
'feature_block_reward_reallocation.py',
'feature_llmq_data_recovery.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py', # NOTE: needs dash_hash to pass
'rpc_fundrawtransaction.py',
'rpc_fundrawtransaction_hd.py',
'wallet_multiwallet.py --usecli',
'p2p_quorum_data.py',
# vv Tests less than 2m vv
'p2p_instantsend.py',
'wallet_basic.py',
'wallet_labels.py',
'wallet_dump.py',
'wallet_listtransactions.py',
'feature_multikeysporks.py',
'feature_llmq_signing.py', # NOTE: needs dash_hash to pass
'feature_llmq_signing.py --spork21', # NOTE: needs dash_hash to pass
'feature_llmq_chainlocks.py', # NOTE: needs dash_hash to pass
'feature_llmq_connections.py', # NOTE: needs dash_hash to pass
'feature_llmq_simplepose.py', # NOTE: needs dash_hash to pass
'feature_llmq_is_cl_conflicts.py', # NOTE: needs dash_hash to pass
'feature_llmq_is_retroactive.py', # NOTE: needs dash_hash to pass
'feature_llmq_dkgerrors.py', # NOTE: needs dash_hash to pass
'feature_dip4_coinbasemerkleroots.py', # NOTE: needs dash_hash to pass
# vv Tests less than 60s vv
'p2p_sendheaders.py', # NOTE: needs dash_hash to pass
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq_dash.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'feature_addressindex.py',
'feature_timestampindex.py',
'feature_spentindex.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'wallet_keypool_hd.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_zmq.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'p2p_compactblocks.py',
'p2p_connect_to_devnet.py',
'feature_sporks.py',
'rpc_getblockstats.py',
'wallet_encryption.py',
'wallet_upgradetohd.py',
'feature_dersig.py',
'feature_cltv.py',
'feature_new_quorum_type_activation.py',
'feature_governance_objects.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'feature_minchainwork.py',
'p2p_unrequested_blocks.py', # NOTE: needs dash_hash to pass
'feature_shutdown.py',
'rpc_coinjoin.py',
'rpc_masternode.py',
'rpc_mnauth.py',
'rpc_verifyislock.py',
'rpc_verifychainlock.py',
'p2p_fingerprint.py',
'rpc_platform_filter.py',
'feature_dip0020_activation.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py', # NOTE: Prune mode is incompatible with -txindex, should work with governance validation disabled though.
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'p2p_timeouts.py',
# vv Tests less than 60s vv
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_txindex.py',
'feature_notifications.py',
'rpc_invalidateblock.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/dash_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and dashd must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
failfast=args.failfast,
runs_ci=args.ci,
combined_logs_len=args.combinedlogslen,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, failfast=False, runs_ci, combined_logs_len=0):
args = args or []
# Warn if dashd is already running (unix only)
try:
pidof_output = subprocess.check_output(["pidof", "dashd"])
if not (pidof_output is None or pidof_output == b''):
print("%sWARNING!%s There is already a dashd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=TRAVIS_TIMEOUT_DURATION if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie dashds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `dash-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| dashpay/dash | test/functional/test_runner.py | Python | mit | 26,250 |
# Copyright 2008 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
__version__ = (2, 5, 5)
def versionstring(build=True, extra=True):
"""Returns the version number of Whoosh as a string.
:param build: Whether to include the build number in the string.
:param extra: Whether to include alpha/beta/rc etc. tags. Only
checked if build is True.
:rtype: str
"""
if build:
first = 3
else:
first = 2
s = ".".join(str(n) for n in __version__[:first])
if build and extra:
s += "".join(str(n) for n in __version__[3:])
return s
| SebastianMerz/calalert | Server/venv/lib/python2.7/site-packages/whoosh/__init__.py | Python | gpl-2.0 | 2,059 |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
from c7n.executor import MainThreadExecutor
from c7n.resources.awslambda import AWSLambda
class LambdaTest(BaseTest):
def test_delete(self):
factory = self.replay_flight_data('test_aws_lambda_delete')
p = self.load_policy({
'name': 'lambda-events',
'resource': 'lambda',
'filters': [
{'FunctionName': 'superduper'}],
'actions': [{'type': 'delete'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['FunctionName'], 'superduper')
client = factory().client('lambda')
self.assertEqual(client.list_functions()['Functions'], [])
def test_event_source(self):
factory = self.replay_flight_data('test_aws_lambda_source')
p = self.load_policy({
'name': 'lambda-events',
'resource': 'lambda',
'filters': [
{'type': 'event-source',
'key': '',
'value': 'not-null'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r['c7n:EventSources'][0] for r in resources},
set(['iot.amazonaws.com']))
def test_sg_filter(self):
factory = self.replay_flight_data('test_aws_lambda_sg')
p = self.load_policy({
'name': 'sg-lambda',
'resource': 'lambda',
'filters': [
{'FunctionName': 'mys3'},
{'type': 'security-group',
'key': 'GroupName',
'value': 'default'}
]}, session_factory=factory)
resources = p.run()
self.assertEqual(resources[0]['FunctionName'], 'mys3')
self.assertEqual(
resources[0]['c7n:matched-security-groups'],
['sg-f9cc4d9f'])
class LambdaTagTest(BaseTest):
def test_lambda_tag_and_remove(self):
self.patch(AWSLambda, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_lambda_tag_and_remove')
client = session_factory().client('lambda')
policy = self.load_policy({
'name': 'lambda-tag',
'resource': 'lambda',
'filters': [
{'FunctionName': 'CloudCustodian'}],
'actions': [
{'type': 'tag', 'key': 'xyz', 'value': 'abcdef'}]
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
arn = resources[0]['FunctionArn']
tags = client.list_tags(Resource=arn)['Tags']
self.assertTrue('xyz' in tags.keys())
policy = self.load_policy({
'name': 'lambda-tag',
'resource': 'lambda',
'filters': [
{'FunctionName': 'CloudCustodian'}],
'actions': [
{'type': 'remove-tag', 'tags': ['xyz']}]
},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
arn = resources[0]['FunctionArn']
tags = client.list_tags(Resource=arn)['Tags']
self.assertFalse('xyz' in tags.keys())
def test_lambda_tags(self):
self.patch(AWSLambda, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_lambda_tags')
policy = self.load_policy({
'name': 'lambda-mark',
'resource': 'lambda',
'filters': [{"tag:Language": "Python"}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_mark_and_match(self):
session_factory = self.replay_flight_data(
'test_lambda_mark_and_match')
client = session_factory().client('lambda')
policy = self.load_policy({
'name': 'lambda-mark',
'resource': 'lambda',
'filters': [{"FunctionName": 'CloudCustodian'}],
'actions': [{
'type': 'mark-for-op', 'op': 'delete',
'tag': 'custodian_next', 'days': 1}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
arn = resources[0]['FunctionArn']
tags = client.list_tags(Resource=arn)['Tags']
self.assertTrue('custodian_next' in tags.keys())
policy = self.load_policy({
'name': 'lambda-mark-filter',
'resource': 'lambda',
'filters': [
{'type': 'marked-for-op', 'tag': 'custodian_next',
'op': 'delete'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
| VeritasOS/cloud-custodian | tests/test_lambda.py | Python | apache-2.0 | 5,594 |
"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
| matthewjwoodruff/moeasensitivity | statistics/statistics.py | Python | lgpl-3.0 | 6,225 |
#
#
import sys, os
from pymongo import Connection
## -----------------------------
## FUNCTIONS -------------------
## -----------------------------
# Convert unicode2utf8 dicts
def convertUnicode2Utf8Dict(data):
import collections
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convertUnicode2Utf8Dict, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convertUnicode2Utf8Dict, data))
else:
return data
#
# Mongo DB Connect
#
xmoduledb = "edxapp"
connection = Connection()
db = connection[xmoduledb]
mongo_modulestore = db['modulestore']
# course -> id.category : course
# section -> id.category : chapter
# subsection -> _id.category : sequential
# problem -> _id.category : vertical
#
# Get Chapters : sections
#
def getCourseChapters(dict_course):
res_list = []
if len(dict_course)>0:
for i, v in enumerate(dict_course):
_id = v.get('_id')
definition = v.get('definition')
metadata = v.get('metadata')
if v.get('_id')['category'] == 'course':
chapters = definition['children']
if len(chapters)>0:
for k in chapters:
sequentials = getCourseSequentials(dict_course,k.split('/')[::-1][0])
res_list.append( {'category': 'chapter', 'module_id' : k, 'name' : k.split('/')[::-1][0], 'chapters': sequentials } )
return res_list
#
# Get Sequentials : subsections
#
def getCourseSequentials(dict_course,cname):
res_list = []
if len(dict_course)>0:
for i, v in enumerate(dict_course):
if v.get('_id')['name']==cname and v.get('_id')['category']=='chapter':
childs = v.get('definition')['children']
if len(childs)>0:
for k in childs:
verticals = getCourseVerticals(dict_course,k.split('/')[::-1][0])
res_list.append( {'category': 'sequential', 'module_id' : k, 'name' : k.split('/')[::-1][0], 'verticals': verticals } )
return res_list
#
# Get Verticals : for group problems in subsection
#
def getCourseVerticals(dict_course,cname):
res_list = []
if len(dict_course)>0:
for i, v in enumerate(dict_course):
if v.get('_id')['name']==cname and v.get('_id')['category']=='sequential':
childs = v.get('definition')['children']
items = []
if len(childs)>0:
for k in childs:
items = getCourseItems(dict_course,k.split('/')[::-1][0])
total_score = getCourseVerticalsScore(dict_course,cname)
res_list.append( {'category': 'vertical', 'module_id' : k, 'name' : k.split('/')[::-1][0], 'items': items , 'total_score': total_score} )
return res_list
#
# Get Items : last level
# filter: problems and iblopenbadges
#
def getCourseItems(dict_course,cname):
res_list = []
badge_id = 0
item_score = 0
total_score = 0
if len(dict_course)>0:
for i, v in enumerate(dict_course):
if v.get('_id')['name']==cname and v.get('_id')['category']=='vertical':
childs = v.get('definition')['children']
if len(childs)>0:
for k in childs:
item_name = k.split('/')[::-1][0]
for item,val in enumerate(dict_course):
if val.get('_id')['name']==item_name and (val.get('_id')['category']=='problem' or val.get('_id')['category']=='iblopenbadges'):
category = val.get('_id')['category']
revision = val.get('_id')['revision']
metadata = val.get('metadata')
definition = val.get('definition')
if category =='iblopenbadges' and revision!='draft':
if 'bg_id' in definition['data']:
badge_id = val.get('definition')['data']['bg_id']
else:
badge_id = 0
res_list.append( {'category': category, 'module_id' : k, 'name' : item_name,'badge_id': badge_id, 'item_score': item_score } )
else:
if category =='problem' and revision!='draft':
item_score=0 #init
if 'weight' in metadata:
item_score= metadata['weight']
if item_score==0: item_score=1
res_list.append( {'category': category, 'module_id' : k, 'name' : item_name,'badge_id': badge_id, 'item_score': item_score } )
return res_list
#
# Get Verticals Score : total subsections
#
def getCourseVerticalsScore(dict_course,cname):
res_list = []
total_score = 0
if len(dict_course)>0:
for i, v in enumerate(dict_course):
if v.get('_id')['name']==cname and v.get('_id')['category']=='sequential':
childs = v.get('definition')['children']
items = []
if len(childs)>0:
for k in childs:
items = getCourseItems(dict_course,k.split('/')[::-1][0])
for item in items:
item_score = item['item_score']
total_score += int(item_score)
return total_score
## -----------------------------
## ENGINE ----------------------
## -----------------------------
def getDictCompleteCourseData(conn,course_id):
course = setParseCourseId(course_id)
dict_course = []
if course!='':
corg = course[0]
ccourse= course[1]
cname = course[2]
res_query = conn.find({'_id.org': ''+corg+'', '_id.course': ''+ccourse+'', '_id.category': { "$in": [ 'course','chapter', 'sequential', 'vertical', 'problem', 'iblopenbadges' ] } }, {'definition.children':1, 'definition.data.bg_id':1, 'metadata.weight':1})
if res_query:
for item in res_query:
dict_course.append(convertUnicode2Utf8Dict(item) )
return dict_course
def getCompleteListProblems(conn,course_id):
result_dict = []
dict_course = getDictCompleteCourseData(conn,course_id)
if len(dict_course)>0:
res_complete = getCourseChapters(dict_course)
for k1 in res_complete:
chapters = k1['chapters']
for k2 in chapters:
chapter_module_id = k2['module_id']
verticals = k2['verticals']
for k3 in verticals:
vertical_module_id = k3['module_id']
vertical_total_score = k3['total_score']
items = k3['items']
for k4 in items:
data_list = {'chapter_module_id':chapter_module_id, 'vertical_module_id':vertical_module_id,
'item_module_id':k4['module_id'],'item_category':k4['category'],
'item_badge_id': k4['badge_id'], 'item_score':k4['item_score'], 'chapter_max_score':vertical_total_score
}
result_dict.append(data_list)
return result_dict
#
# Parse course_id
#
def setParseCourseId(course_id):
if course_id !='' and course_id !='None':
course = course_id.split('/')
corg= course[0]
ccourse = course[1]
cname = course[2]
if corg!='' and ccourse!='' and cname!='':
return course
else:
return ''
#
# Get Problems from guiven badge_id
#
def getListProblemsFromBadgeId(conn,badge_id,course_id):
chapter_module_id =''
problems_list =[]
if course_id!='' and course_id!='None' and badge_id!='' and badge_id!='None':
dict_course = getCompleteListProblems(conn,course_id)
if len(dict_course)>0:
for k in dict_course:
if k['item_badge_id'] == badge_id:
chapter_module_id = k['chapter_module_id']
if chapter_module_id !='':
for p in dict_course:
if p['chapter_module_id'] == chapter_module_id:
#print ('%s : %s') % (p['item_module_id'],p['item_score'])
problems_list.append({ 'problem_id':p['item_module_id'], 'problem_score':p['item_score'] } )
return problems_list
#
# Get Score from guiven badge_id
#
def getScoreFromBadgeId(conn,badge_id,course_id):
score = '0'
problems_list=[]
if course_id!='' and course_id!='None' and badge_id!='' and badge_id!='None':
dict_course = getCompleteListProblems(conn,course_id)
if len(dict_course)>0:
for k in dict_course:
if k['item_badge_id'] == badge_id:
score = k['chapter_max_score']
return score
# -----------------------------------------------
# To remove when finish : Tests
# -----------------------------------------------
"""
# Mongo DB Connect
from pymongo import Connection
xmoduledb = "edxapp"
connection = Connection()
db_mongo = connection[xmoduledb]
mongo_modulestore = db_mongo['modulestore']
bg_id='2008'
course_id='IBL/1/2015_2'
#badge_list_problems = getListProblemsFromBadgeId(mongo_modulestore,bg_id,course_id)
#print badge_list_problems
#badge_problems_score =getScoreFromBadgeId(mongo_modulestore,bg_id,course_id)
#print badge_problems_score
"""
| iblstudios/OpenBadgesXBlock-v1 | badges/edxappCourseData.py | Python | gpl-3.0 | 8,119 |
"""Support for MQTT message handling."""
from __future__ import annotations
from ast import literal_eval
import asyncio
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
import datetime as dt
from functools import lru_cache, partial, wraps
import inspect
from itertools import groupby
import logging
from operator import attrgetter
import ssl
import time
from typing import Any, Union, cast
import uuid
import attr
import certifi
import jinja2
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import websocket_api
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_CLIENT_ID,
CONF_DISCOVERY,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
SERVICE_RELOAD,
Platform,
)
from homeassistant.core import (
CoreState,
Event,
HassJob,
HomeAssistant,
ServiceCall,
callback,
)
from homeassistant.data_entry_flow import BaseServiceInfo
from homeassistant.exceptions import HomeAssistantError, TemplateError, Unauthorized
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
event,
template,
)
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.frame import report
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.logging import catch_log_exception
# Loading the config flow file will register the flow
from . import debug_info, discovery
from .const import (
ATTR_PAYLOAD,
ATTR_QOS,
ATTR_RETAIN,
ATTR_TOPIC,
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_COMMAND_TOPIC,
CONF_ENCODING,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_TOPIC,
CONF_WILL_MESSAGE,
DATA_MQTT_CONFIG,
DATA_MQTT_RELOAD_NEEDED,
DEFAULT_BIRTH,
DEFAULT_DISCOVERY,
DEFAULT_ENCODING,
DEFAULT_PREFIX,
DEFAULT_QOS,
DEFAULT_RETAIN,
DEFAULT_WILL,
DOMAIN,
MQTT_CONNECTED,
MQTT_DISCONNECTED,
PROTOCOL_311,
)
from .discovery import LAST_DISCOVERY
from .models import (
AsyncMessageCallbackType,
MessageCallbackType,
PublishMessage,
PublishPayloadType,
ReceiveMessage,
ReceivePayloadType,
)
from .util import _VALID_QOS_SCHEMA, valid_publish_topic, valid_subscribe_topic
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATA_MQTT = "mqtt"
SERVICE_PUBLISH = "publish"
SERVICE_DUMP = "dump"
CONF_DISCOVERY_PREFIX = "discovery_prefix"
CONF_KEEPALIVE = "keepalive"
CONF_CERTIFICATE = "certificate"
CONF_CLIENT_KEY = "client_key"
CONF_CLIENT_CERT = "client_cert"
CONF_TLS_INSECURE = "tls_insecure"
CONF_TLS_VERSION = "tls_version"
PROTOCOL_31 = "3.1"
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_TLS_PROTOCOL = "auto"
ATTR_TOPIC_TEMPLATE = "topic_template"
ATTR_PAYLOAD_TEMPLATE = "payload_template"
MAX_RECONNECT_WAIT = 300 # seconds
CONNECTION_SUCCESS = "connection_success"
CONNECTION_FAILED = "connection_failed"
CONNECTION_FAILED_RECOVERABLE = "connection_failed_recoverable"
DISCOVERY_COOLDOWN = 2
TIMEOUT_ACK = 10
PLATFORMS = [
Platform.ALARM_CONTROL_PANEL,
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.CAMERA,
Platform.CLIMATE,
Platform.COVER,
Platform.FAN,
Platform.HUMIDIFIER,
Platform.LIGHT,
Platform.LOCK,
Platform.NUMBER,
Platform.SELECT,
Platform.SCENE,
Platform.SENSOR,
Platform.SIREN,
Platform.SWITCH,
Platform.VACUUM,
]
CLIENT_KEY_AUTH_MSG = (
"client_key and client_cert must both be present in "
"the MQTT broker configuration"
)
MQTT_WILL_BIRTH_SCHEMA = vol.Schema(
{
vol.Inclusive(ATTR_TOPIC, "topic_payload"): valid_publish_topic,
vol.Inclusive(ATTR_PAYLOAD, "topic_payload"): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
)
CONFIG_SCHEMA_BASE = vol.Schema(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any("auto", cv.isfile),
vol.Inclusive(
CONF_CLIENT_KEY, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Inclusive(
CONF_CLIENT_CERT, "client_key_auth", msg=CLIENT_KEY_AUTH_MSG
): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(CONF_TLS_VERSION, default=DEFAULT_TLS_PROTOCOL): vol.Any(
"auto", "1.0", "1.1", "1.2"
),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
vol.Optional(CONF_WILL_MESSAGE, default=DEFAULT_WILL): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_BIRTH_MESSAGE, default=DEFAULT_BIRTH): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
# discovery_prefix must be a valid publish topic because if no
# state topic is specified, it will be created with the given prefix.
vol.Optional(
CONF_DISCOVERY_PREFIX, default=DEFAULT_PREFIX
): valid_publish_topic,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.deprecated(CONF_BIRTH_MESSAGE), # Deprecated in HA Core 2022.3
cv.deprecated(CONF_BROKER), # Deprecated in HA Core 2022.3
cv.deprecated(CONF_DISCOVERY), # Deprecated in HA Core 2022.3
cv.deprecated(CONF_PASSWORD), # Deprecated in HA Core 2022.3
cv.deprecated(CONF_PORT), # Deprecated in HA Core 2022.3
cv.deprecated(CONF_TLS_VERSION), # Deprecated June 2020
cv.deprecated(CONF_USERNAME), # Deprecated in HA Core 2022.3
cv.deprecated(CONF_WILL_MESSAGE), # Deprecated in HA Core 2022.3
CONFIG_SCHEMA_BASE,
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_BASE = {
vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
}
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
}
)
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.All(
vol.Schema(
{
vol.Exclusive(ATTR_TOPIC, CONF_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_TOPIC_TEMPLATE, CONF_TOPIC): cv.string,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): cv.string,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
},
required=True,
),
cv.has_at_least_one_key(ATTR_TOPIC, ATTR_TOPIC_TEMPLATE),
)
SubscribePayloadType = Union[str, bytes] # Only bytes if encoding is None
class MqttCommandTemplate:
"""Class for rendering MQTT payload with command templates."""
def __init__(
self,
command_template: template.Template | None,
*,
hass: HomeAssistant | None = None,
entity: Entity | None = None,
) -> None:
"""Instantiate a command template."""
self._attr_command_template = command_template
if command_template is None:
return
self._entity = entity
command_template.hass = hass
if entity:
command_template.hass = entity.hass
@callback
def async_render(
self,
value: PublishPayloadType = None,
variables: TemplateVarsType = None,
) -> PublishPayloadType:
"""Render or convert the command template with given value or variables."""
def _convert_outgoing_payload(
payload: PublishPayloadType,
) -> PublishPayloadType:
"""Ensure correct raw MQTT payload is passed as bytes for publishing."""
if isinstance(payload, str):
try:
native_object = literal_eval(payload)
if isinstance(native_object, bytes):
return native_object
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return payload
if self._attr_command_template is None:
return value
values = {"value": value}
if self._entity:
values[ATTR_ENTITY_ID] = self._entity.entity_id
values[ATTR_NAME] = self._entity.name
if variables is not None:
values.update(variables)
return _convert_outgoing_payload(
self._attr_command_template.async_render(values, parse_result=False)
)
class MqttValueTemplate:
"""Class for rendering MQTT value template with possible json values."""
def __init__(
self,
value_template: template.Template | None,
*,
hass: HomeAssistant | None = None,
entity: Entity | None = None,
config_attributes: TemplateVarsType = None,
) -> None:
"""Instantiate a value template."""
self._value_template = value_template
self._config_attributes = config_attributes
if value_template is None:
return
value_template.hass = hass
self._entity = entity
if entity:
value_template.hass = entity.hass
@callback
def async_render_with_possible_json_value(
self,
payload: ReceivePayloadType,
default: ReceivePayloadType | object = _SENTINEL,
variables: TemplateVarsType = None,
) -> ReceivePayloadType:
"""Render with possible json value or pass-though a received MQTT value."""
if self._value_template is None:
return payload
values: dict[str, Any] = {}
if variables is not None:
values.update(variables)
if self._config_attributes is not None:
values.update(self._config_attributes)
if self._entity:
values[ATTR_ENTITY_ID] = self._entity.entity_id
values[ATTR_NAME] = self._entity.name
if default == _SENTINEL:
return self._value_template.async_render_with_possible_json_value(
payload, variables=values
)
return self._value_template.async_render_with_possible_json_value(
payload, default, variables=values
)
@dataclass
class MqttServiceInfo(BaseServiceInfo):
"""Prepared info from mqtt entries."""
topic: str
payload: ReceivePayloadType
qos: int
retain: bool
subscribed_topic: str
timestamp: dt.datetime
def __getitem__(self, name: str) -> Any:
"""
Allow property access by name for compatibility reason.
Deprecated, and will be removed in version 2022.6.
"""
report(
f"accessed discovery_info['{name}'] instead of discovery_info.{name}; "
"this will fail in version 2022.6",
exclude_integrations={DOMAIN},
error_if_core=False,
)
return getattr(self, name)
def publish(
hass: HomeAssistant,
topic: str,
payload: PublishPayloadType,
qos: int | None = 0,
retain: bool | None = False,
encoding: str | None = DEFAULT_ENCODING,
) -> None:
"""Publish message to a MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain, encoding)
async def async_publish(
hass: HomeAssistant,
topic: str,
payload: PublishPayloadType,
qos: int | None = 0,
retain: bool | None = False,
encoding: str | None = DEFAULT_ENCODING,
) -> None:
"""Publish message to a MQTT topic."""
outgoing_payload = payload
if not isinstance(payload, bytes):
if not encoding:
_LOGGER.error(
"Can't pass-through payload for publishing %s on %s with no encoding set, need 'bytes' got %s",
payload,
topic,
type(payload),
)
return
outgoing_payload = str(payload)
if encoding != DEFAULT_ENCODING:
# a string is encoded as utf-8 by default, other encoding requires bytes as payload
try:
outgoing_payload = outgoing_payload.encode(encoding)
except (AttributeError, LookupError, UnicodeEncodeError):
_LOGGER.error(
"Can't encode payload for publishing %s on %s with encoding %s",
payload,
topic,
encoding,
)
return
await hass.data[DATA_MQTT].async_publish(topic, outgoing_payload, qos, retain)
AsyncDeprecatedMessageCallbackType = Callable[
[str, ReceivePayloadType, int], Awaitable[None]
]
DeprecatedMessageCallbackType = Callable[[str, ReceivePayloadType, int], None]
def wrap_msg_callback(
msg_callback: AsyncDeprecatedMessageCallbackType | DeprecatedMessageCallbackType,
) -> AsyncMessageCallbackType | MessageCallbackType:
"""Wrap an MQTT message callback to support deprecated signature."""
# Check for partials to properly determine if coroutine function
check_func = msg_callback
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func: AsyncMessageCallbackType | MessageCallbackType
if asyncio.iscoroutinefunction(check_func):
@wraps(msg_callback)
async def async_wrapper(msg: ReceiveMessage) -> None:
"""Call with deprecated signature."""
await cast(AsyncDeprecatedMessageCallbackType, msg_callback)(
msg.topic, msg.payload, msg.qos
)
wrapper_func = async_wrapper
else:
@wraps(msg_callback)
def wrapper(msg: ReceiveMessage) -> None:
"""Call with deprecated signature."""
msg_callback(msg.topic, msg.payload, msg.qos)
wrapper_func = wrapper
return wrapper_func
@bind_hass
async def async_subscribe(
hass: HomeAssistant,
topic: str,
msg_callback: AsyncMessageCallbackType
| MessageCallbackType
| DeprecatedMessageCallbackType
| AsyncDeprecatedMessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: str | None = "utf-8",
):
"""Subscribe to an MQTT topic.
Call the return value to unsubscribe.
"""
# Count callback parameters which don't have a default value
non_default = 0
if msg_callback:
non_default = sum(
p.default == inspect.Parameter.empty
for _, p in inspect.signature(msg_callback).parameters.items()
)
wrapped_msg_callback = msg_callback
# If we have 3 parameters with no default value, wrap the callback
if non_default == 3:
module = inspect.getmodule(msg_callback)
_LOGGER.warning(
"Signature of MQTT msg_callback '%s.%s' is deprecated",
module.__name__ if module else "<unknown>",
msg_callback.__name__,
)
wrapped_msg_callback = wrap_msg_callback(
cast(DeprecatedMessageCallbackType, msg_callback)
)
async_remove = await hass.data[DATA_MQTT].async_subscribe(
topic,
catch_log_exception(
wrapped_msg_callback,
lambda msg: (
f"Exception in {msg_callback.__name__} when handling msg on "
f"'{msg.topic}': '{msg.payload}'"
),
),
qos,
encoding,
)
return async_remove
@bind_hass
def subscribe(
hass: HomeAssistant,
topic: str,
msg_callback: MessageCallbackType,
qos: int = DEFAULT_QOS,
encoding: str = "utf-8",
) -> Callable[[], None]:
"""Subscribe to an MQTT topic."""
async_remove = asyncio.run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback, qos, encoding), hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
async def _async_setup_discovery(
hass: HomeAssistant, conf: ConfigType, config_entry
) -> None:
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
await discovery.async_start(hass, conf[CONF_DISCOVERY_PREFIX], config_entry)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Start the MQTT protocol service."""
conf: ConfigType | None = config.get(DOMAIN)
websocket_api.async_register_command(hass, websocket_subscribe)
websocket_api.async_register_command(hass, websocket_remove_device)
websocket_api.async_register_command(hass, websocket_mqtt_info)
debug_info.initialize(hass)
if conf:
conf = dict(conf)
hass.data[DATA_MQTT_CONFIG] = conf
if not bool(hass.config_entries.async_entries(DOMAIN)):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_INTEGRATION_DISCOVERY},
data={},
)
)
return True
def _merge_config(entry, conf):
"""Merge configuration.yaml config with config entry."""
return {**conf, **entry.data}
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load a config entry."""
# If user didn't have configuration.yaml config, generate defaults
if (conf := hass.data.get(DATA_MQTT_CONFIG)) is None:
conf = CONFIG_SCHEMA_BASE(dict(entry.data))
elif any(key in conf for key in entry.data):
shared_keys = conf.keys() & entry.data.keys()
override = {k: entry.data[k] for k in shared_keys}
if CONF_PASSWORD in override:
override[CONF_PASSWORD] = "********"
_LOGGER.info(
"Data in your configuration entry is going to override your "
"configuration.yaml: %s",
override,
)
conf = _merge_config(entry, conf)
hass.data[DATA_MQTT] = MQTT(
hass,
entry,
conf,
)
await hass.data[DATA_MQTT].async_connect()
async def async_stop_mqtt(_event: Event):
"""Stop MQTT component."""
await hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
async def async_publish_service(call: ServiceCall) -> None:
"""Handle MQTT publish service calls."""
msg_topic = call.data.get(ATTR_TOPIC)
msg_topic_template = call.data.get(ATTR_TOPIC_TEMPLATE)
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos: int = call.data[ATTR_QOS]
retain: bool = call.data[ATTR_RETAIN]
if msg_topic_template is not None:
try:
rendered_topic = template.Template(
msg_topic_template, hass
).async_render(parse_result=False)
msg_topic = valid_publish_topic(rendered_topic)
except (jinja2.TemplateError, TemplateError) as exc:
_LOGGER.error(
"Unable to publish: rendering topic template of %s "
"failed because %s",
msg_topic_template,
exc,
)
return
except vol.Invalid as err:
_LOGGER.error(
"Unable to publish: topic template '%s' produced an "
"invalid topic '%s' after rendering (%s)",
msg_topic_template,
rendered_topic,
err,
)
return
if payload_template is not None:
try:
payload = MqttCommandTemplate(
template.Template(payload_template), hass=hass
).async_render()
except (jinja2.TemplateError, TemplateError) as exc:
_LOGGER.error(
"Unable to publish to %s: rendering payload template of "
"%s failed because %s",
msg_topic,
payload_template,
exc,
)
return
await hass.data[DATA_MQTT].async_publish(msg_topic, payload, qos, retain)
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service, schema=MQTT_PUBLISH_SCHEMA
)
async def async_dump_service(call: ServiceCall) -> None:
"""Handle MQTT dump service calls."""
messages = []
@callback
def collect_msg(msg):
messages.append((msg.topic, msg.payload.replace("\n", "")))
unsub = await async_subscribe(hass, call.data["topic"], collect_msg)
def write_dump():
with open(hass.config.path("mqtt_dump.txt"), "wt", encoding="utf8") as fp:
for msg in messages:
fp.write(",".join(msg) + "\n")
async def finish_dump(_):
"""Write dump to file."""
unsub()
await hass.async_add_executor_job(write_dump)
event.async_call_later(hass, call.data["duration"], finish_dump)
hass.services.async_register(
DOMAIN,
SERVICE_DUMP,
async_dump_service,
schema=vol.Schema(
{
vol.Required("topic"): valid_subscribe_topic,
vol.Optional("duration", default=5): int,
}
),
)
if conf.get(CONF_DISCOVERY):
await _async_setup_discovery(hass, conf, entry)
if DATA_MQTT_RELOAD_NEEDED in hass.data:
hass.data.pop(DATA_MQTT_RELOAD_NEEDED)
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=False,
)
return True
@attr.s(slots=True, frozen=True)
class Subscription:
"""Class to hold data about an active subscription."""
topic: str = attr.ib()
matcher: Any = attr.ib()
job: HassJob = attr.ib()
qos: int = attr.ib(default=0)
encoding: str | None = attr.ib(default="utf-8")
class MQTT:
"""Home Assistant MQTT client."""
def __init__(
self,
hass: HomeAssistant,
config_entry,
conf,
) -> None:
"""Initialize Home Assistant MQTT client."""
# We don't import on the top because some integrations
# should be able to optionally rely on MQTT.
import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel
self.hass = hass
self.config_entry = config_entry
self.conf = conf
self.subscriptions: list[Subscription] = []
self.connected = False
self._ha_started = asyncio.Event()
self._last_subscribe = time.time()
self._mqttc: mqtt.Client = None
self._paho_lock = asyncio.Lock()
self._pending_operations: dict[str, asyncio.Event] = {}
if self.hass.state == CoreState.running:
self._ha_started.set()
else:
@callback
def ha_started(_):
self._ha_started.set()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, ha_started)
self.init_client()
self.config_entry.add_update_listener(self.async_config_entry_updated)
@staticmethod
async def async_config_entry_updated(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is config entry options changing.
"""
self = hass.data[DATA_MQTT]
if (conf := hass.data.get(DATA_MQTT_CONFIG)) is None:
conf = CONFIG_SCHEMA_BASE(dict(entry.data))
self.conf = _merge_config(entry, conf)
await self.async_disconnect()
self.init_client()
await self.async_connect()
await discovery.async_stop(hass)
if self.conf.get(CONF_DISCOVERY):
await _async_setup_discovery(hass, self.conf, entry)
def init_client(self):
"""Initialize paho client."""
# We don't import on the top because some integrations
# should be able to optionally rely on MQTT.
import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel
if self.conf[CONF_PROTOCOL] == PROTOCOL_31:
proto: int = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
if (client_id := self.conf.get(CONF_CLIENT_ID)) is None:
# PAHO MQTT relies on the MQTT server to generate random client IDs.
# However, that feature is not mandatory so we generate our own.
client_id = mqtt.base62(uuid.uuid4().int, padding=22)
self._mqttc = mqtt.Client(client_id, protocol=proto)
# Enable logging
self._mqttc.enable_logger()
username = self.conf.get(CONF_USERNAME)
password = self.conf.get(CONF_PASSWORD)
if username is not None:
self._mqttc.username_pw_set(username, password)
if (certificate := self.conf.get(CONF_CERTIFICATE)) == "auto":
certificate = certifi.where()
client_key = self.conf.get(CONF_CLIENT_KEY)
client_cert = self.conf.get(CONF_CLIENT_CERT)
tls_insecure = self.conf.get(CONF_TLS_INSECURE)
if certificate is not None:
self._mqttc.tls_set(
certificate,
certfile=client_cert,
keyfile=client_key,
tls_version=ssl.PROTOCOL_TLS,
)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
self._mqttc.on_publish = self._mqtt_on_callback
self._mqttc.on_subscribe = self._mqtt_on_callback
self._mqttc.on_unsubscribe = self._mqtt_on_callback
if (
CONF_WILL_MESSAGE in self.conf
and ATTR_TOPIC in self.conf[CONF_WILL_MESSAGE]
):
will_message = PublishMessage(**self.conf[CONF_WILL_MESSAGE])
else:
will_message = None
if will_message is not None:
self._mqttc.will_set(
topic=will_message.topic,
payload=will_message.payload,
qos=will_message.qos,
retain=will_message.retain,
)
async def async_publish(
self, topic: str, payload: PublishPayloadType, qos: int, retain: bool
) -> None:
"""Publish a MQTT message."""
async with self._paho_lock:
msg_info = await self.hass.async_add_executor_job(
self._mqttc.publish, topic, payload, qos, retain
)
_LOGGER.debug(
"Transmitting message on %s: '%s', mid: %s",
topic,
payload,
msg_info.mid,
)
_raise_on_error(msg_info.rc)
await self._wait_for_mid(msg_info.mid)
async def async_connect(self) -> None:
"""Connect to the host. Does not process messages yet."""
# pylint: disable-next=import-outside-toplevel
import paho.mqtt.client as mqtt
result: int | None = None
try:
result = await self.hass.async_add_executor_job(
self._mqttc.connect,
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
self.conf[CONF_KEEPALIVE],
)
except OSError as err:
_LOGGER.error("Failed to connect to MQTT server due to exception: %s", err)
if result is not None and result != 0:
_LOGGER.error(
"Failed to connect to MQTT server: %s", mqtt.error_string(result)
)
self._mqttc.loop_start()
async def async_disconnect(self):
"""Stop the MQTT client."""
def stop():
"""Stop the MQTT client."""
# Do not disconnect, we want the broker to always publish will
self._mqttc.loop_stop()
await self.hass.async_add_executor_job(stop)
async def async_subscribe(
self,
topic: str,
msg_callback: MessageCallbackType,
qos: int,
encoding: str | None = None,
) -> Callable[[], None]:
"""Set up a subscription to a topic with the provided qos.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("Topic needs to be a string!")
subscription = Subscription(
topic, _matcher_for_topic(topic), HassJob(msg_callback), qos, encoding
)
self.subscriptions.append(subscription)
self._matching_subscriptions.cache_clear()
# Only subscribe if currently connected.
if self.connected:
self._last_subscribe = time.time()
await self._async_perform_subscription(topic, qos)
@callback
def async_remove() -> None:
"""Remove subscription."""
if subscription not in self.subscriptions:
raise HomeAssistantError("Can't remove subscription twice")
self.subscriptions.remove(subscription)
self._matching_subscriptions.cache_clear()
if any(other.topic == topic for other in self.subscriptions):
# Other subscriptions on topic remaining - don't unsubscribe.
return
# Only unsubscribe if currently connected.
if self.connected:
self.hass.async_create_task(self._async_unsubscribe(topic))
return async_remove
async def _async_unsubscribe(self, topic: str) -> None:
"""Unsubscribe from a topic.
This method is a coroutine.
"""
async with self._paho_lock:
result: int | None = None
result, mid = await self.hass.async_add_executor_job(
self._mqttc.unsubscribe, topic
)
_LOGGER.debug("Unsubscribing from %s, mid: %s", topic, mid)
_raise_on_error(result)
await self._wait_for_mid(mid)
async def _async_perform_subscription(self, topic: str, qos: int) -> None:
"""Perform a paho-mqtt subscription."""
async with self._paho_lock:
result: int | None = None
result, mid = await self.hass.async_add_executor_job(
self._mqttc.subscribe, topic, qos
)
_LOGGER.debug("Subscribing to %s, mid: %s", topic, mid)
_raise_on_error(result)
await self._wait_for_mid(mid)
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code: int) -> None:
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
# pylint: disable-next=import-outside-toplevel
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error(
"Unable to connect to the MQTT broker: %s",
mqtt.connack_string(result_code),
)
return
self.connected = True
dispatcher_send(self.hass, MQTT_CONNECTED)
_LOGGER.info(
"Connected to MQTT server %s:%s (%s)",
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
result_code,
)
# Group subscriptions to only re-subscribe once for each topic.
keyfunc = attrgetter("topic")
for topic, subs in groupby(sorted(self.subscriptions, key=keyfunc), keyfunc):
# Re-subscribe with the highest requested qos
max_qos = max(subscription.qos for subscription in subs)
self.hass.add_job(self._async_perform_subscription, topic, max_qos)
if (
CONF_BIRTH_MESSAGE in self.conf
and ATTR_TOPIC in self.conf[CONF_BIRTH_MESSAGE]
):
async def publish_birth_message(birth_message):
await self._ha_started.wait() # Wait for Home Assistant to start
await self._discovery_cooldown() # Wait for MQTT discovery to cool down
await self.async_publish(
topic=birth_message.topic,
payload=birth_message.payload,
qos=birth_message.qos,
retain=birth_message.retain,
)
birth_message = PublishMessage(**self.conf[CONF_BIRTH_MESSAGE])
asyncio.run_coroutine_threadsafe(
publish_birth_message(birth_message), self.hass.loop
)
def _mqtt_on_message(self, _mqttc, _userdata, msg) -> None:
"""Message received callback."""
self.hass.add_job(self._mqtt_handle_message, msg)
@lru_cache(2048)
def _matching_subscriptions(self, topic):
subscriptions = []
for subscription in self.subscriptions:
if subscription.matcher(topic):
subscriptions.append(subscription)
return subscriptions
@callback
def _mqtt_handle_message(self, msg) -> None:
_LOGGER.debug(
"Received message on %s%s: %s",
msg.topic,
" (retained)" if msg.retain else "",
msg.payload[0:8192],
)
timestamp = dt_util.utcnow()
subscriptions = self._matching_subscriptions(msg.topic)
for subscription in subscriptions:
payload: SubscribePayloadType = msg.payload
if subscription.encoding is not None:
try:
payload = msg.payload.decode(subscription.encoding)
except (AttributeError, UnicodeDecodeError):
_LOGGER.warning(
"Can't decode payload %s on %s with encoding %s (for %s)",
msg.payload[0:8192],
msg.topic,
subscription.encoding,
subscription.job,
)
continue
self.hass.async_run_hass_job(
subscription.job,
ReceiveMessage(
msg.topic,
payload,
msg.qos,
msg.retain,
subscription.topic,
timestamp,
),
)
def _mqtt_on_callback(self, _mqttc, _userdata, mid, _granted_qos=None) -> None:
"""Publish / Subscribe / Unsubscribe callback."""
self.hass.add_job(self._mqtt_handle_mid, mid)
@callback
def _mqtt_handle_mid(self, mid) -> None:
# Create the mid event if not created, either _mqtt_handle_mid or _wait_for_mid
# may be executed first.
if mid not in self._pending_operations:
self._pending_operations[mid] = asyncio.Event()
self._pending_operations[mid].set()
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code: int) -> None:
"""Disconnected callback."""
self.connected = False
dispatcher_send(self.hass, MQTT_DISCONNECTED)
_LOGGER.warning(
"Disconnected from MQTT server %s:%s (%s)",
self.conf[CONF_BROKER],
self.conf[CONF_PORT],
result_code,
)
async def _wait_for_mid(self, mid):
"""Wait for ACK from broker."""
# Create the mid event if not created, either _mqtt_handle_mid or _wait_for_mid
# may be executed first.
if mid not in self._pending_operations:
self._pending_operations[mid] = asyncio.Event()
try:
await asyncio.wait_for(self._pending_operations[mid].wait(), TIMEOUT_ACK)
except asyncio.TimeoutError:
_LOGGER.warning(
"No ACK from MQTT server in %s seconds (mid: %s)", TIMEOUT_ACK, mid
)
finally:
del self._pending_operations[mid]
async def _discovery_cooldown(self):
now = time.time()
# Reset discovery and subscribe cooldowns
self.hass.data[LAST_DISCOVERY] = now
self._last_subscribe = now
last_discovery = self.hass.data[LAST_DISCOVERY]
last_subscribe = self._last_subscribe
wait_until = max(
last_discovery + DISCOVERY_COOLDOWN, last_subscribe + DISCOVERY_COOLDOWN
)
while now < wait_until:
await asyncio.sleep(wait_until - now)
now = time.time()
last_discovery = self.hass.data[LAST_DISCOVERY]
last_subscribe = self._last_subscribe
wait_until = max(
last_discovery + DISCOVERY_COOLDOWN, last_subscribe + DISCOVERY_COOLDOWN
)
def _raise_on_error(result_code: int | None) -> None:
"""Raise error if error result."""
# pylint: disable-next=import-outside-toplevel
import paho.mqtt.client as mqtt
if result_code is not None and result_code != 0:
raise HomeAssistantError(
f"Error talking to MQTT: {mqtt.error_string(result_code)}"
)
def _matcher_for_topic(subscription: str) -> Any:
# pylint: disable-next=import-outside-toplevel
from paho.mqtt.matcher import MQTTMatcher
matcher = MQTTMatcher()
matcher[subscription] = True
return lambda topic: next(matcher.iter_match(topic), False)
@websocket_api.websocket_command(
{vol.Required("type"): "mqtt/device/debug_info", vol.Required("device_id"): str}
)
@callback
def websocket_mqtt_info(hass, connection, msg):
"""Get MQTT debug info for device."""
device_id = msg["device_id"]
mqtt_info = debug_info.info_for_device(hass, device_id)
connection.send_result(msg["id"], mqtt_info)
@websocket_api.websocket_command(
{vol.Required("type"): "mqtt/device/remove", vol.Required("device_id"): str}
)
@websocket_api.async_response
async def websocket_remove_device(hass, connection, msg):
"""Delete device."""
device_id = msg["device_id"]
device_registry = dr.async_get(hass)
if not (device := device_registry.async_get(device_id)):
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Device not found"
)
return
for config_entry in device.config_entries:
config_entry = hass.config_entries.async_get_entry(config_entry)
# Only delete the device if it belongs to an MQTT device entry
if config_entry.domain == DOMAIN:
await async_remove_config_entry_device(hass, config_entry, device)
device_registry.async_update_device(
device_id, remove_config_entry_id=config_entry.entry_id
)
connection.send_message(websocket_api.result_message(msg["id"]))
return
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Non MQTT device"
)
@websocket_api.websocket_command(
{
vol.Required("type"): "mqtt/subscribe",
vol.Required("topic"): valid_subscribe_topic,
}
)
@websocket_api.async_response
async def websocket_subscribe(hass, connection, msg):
"""Subscribe to a MQTT topic."""
if not connection.user.is_admin:
raise Unauthorized
async def forward_messages(mqttmsg: ReceiveMessage):
"""Forward events to websocket."""
connection.send_message(
websocket_api.event_message(
msg["id"],
{
"topic": mqttmsg.topic,
"payload": mqttmsg.payload,
"qos": mqttmsg.qos,
"retain": mqttmsg.retain,
},
)
)
connection.subscriptions[msg["id"]] = await async_subscribe(
hass, msg["topic"], forward_messages
)
connection.send_message(websocket_api.result_message(msg["id"]))
ConnectionStatusCallback = Callable[[bool], None]
@callback
def async_subscribe_connection_status(
hass: HomeAssistant, connection_status_callback: ConnectionStatusCallback
) -> Callable[[], None]:
"""Subscribe to MQTT connection changes."""
connection_status_callback_job = HassJob(connection_status_callback)
async def connected():
task = hass.async_run_hass_job(connection_status_callback_job, True)
if task:
await task
async def disconnected():
task = hass.async_run_hass_job(connection_status_callback_job, False)
if task:
await task
subscriptions = {
"connect": async_dispatcher_connect(hass, MQTT_CONNECTED, connected),
"disconnect": async_dispatcher_connect(hass, MQTT_DISCONNECTED, disconnected),
}
@callback
def unsubscribe():
subscriptions["connect"]()
subscriptions["disconnect"]()
return unsubscribe
def is_connected(hass: HomeAssistant) -> bool:
"""Return if MQTT client is connected."""
return hass.data[DATA_MQTT].connected
async def async_remove_config_entry_device(
hass: HomeAssistant, config_entry: ConfigEntry, device_entry: DeviceEntry
) -> bool:
"""Remove MQTT config entry from a device."""
# pylint: disable-next=import-outside-toplevel
from . import device_automation
await device_automation.async_removed_from_device(hass, device_entry.id)
return True
| rohitranjan1991/home-assistant | homeassistant/components/mqtt/__init__.py | Python | mit | 42,892 |
"""The harness interface
The interface between the client and the server when hosted.
"""
__author__ = """Copyright Andy Whitcroft 2006"""
import os, sys, logging
import common
class harness(object):
"""The NULL server harness
Properties:
job
The job object for this job
"""
def __init__(self, job):
"""
job
The job object for this job
"""
self.setup(job)
def setup(self, job):
"""
job
The job object for this job
"""
self.job = job
configd = os.path.join(os.environ['AUTODIR'], 'configs')
if os.path.isdir(configd):
(name, dirs, files) = os.walk(configd).next()
job.config_set('kernel.default_config_set',
[ configd + '/' ] + files)
def run_start(self):
"""A run within this job is starting"""
pass
def run_pause(self):
"""A run within this job is completing (expect continue)"""
pass
def run_reboot(self):
"""A run within this job is performing a reboot
(expect continue following reboot)
"""
pass
def run_abort(self):
"""A run within this job is aborting. It all went wrong"""
pass
def run_complete(self):
"""A run within this job is completing (all done)"""
pass
def run_test_complete(self):
"""A test run by this job is complete. Note that if multiple
tests are run in parallel, this will only be called when all
of the parallel runs complete."""
pass
def test_status(self, status, tag):
"""A test within this job is completing"""
pass
def test_status_detail(self, code, subdir, operation, status, tag,
optional_fields):
"""A test within this job is completing (detail)"""
pass
def select(which, job, harness_args):
if not which:
which = 'standalone'
logging.debug('Selected harness: %s' % which)
harness_name = 'harness_%s' % which
harness_module = common.setup_modules.import_module(harness_name,
'autotest_lib.client.bin')
harness_instance = getattr(harness_module, harness_name)(job, harness_args)
return harness_instance
| clebergnu/autotest | client/bin/harness.py | Python | gpl-2.0 | 2,418 |
#!/usr/bin/env python
import fnmatch
import os
import sys
import re
import math
import platform
import xml.etree.ElementTree as ET
################################################################################
# Config #
################################################################################
flags = {
'c': platform.platform() != 'Windows', # Disable by default on windows, since we use ANSI escape codes
'b': False,
'g': False,
's': False,
'u': False,
'h': False,
'p': False,
'o': True,
'i': False,
'a': True,
'e': False,
}
flag_descriptions = {
'c': 'Toggle colors when outputting.',
'b': 'Toggle showing only not fully described classes.',
'g': 'Toggle showing only completed classes.',
's': 'Toggle showing comments about the status.',
'u': 'Toggle URLs to docs.',
'h': 'Show help and exit.',
'p': 'Toggle showing percentage as well as counts.',
'o': 'Toggle overall column.',
'i': 'Toggle collapse of class items columns.',
'a': 'Toggle showing all items.',
'e': 'Toggle hiding empty items.',
}
long_flags = {
'colors': 'c',
'use-colors': 'c',
'bad': 'b',
'only-bad': 'b',
'good': 'g',
'only-good': 'g',
'comments': 's',
'status': 's',
'urls': 'u',
'gen-url': 'u',
'help': 'h',
'percent': 'p',
'use-percentages': 'p',
'overall': 'o',
'use-overall': 'o',
'items': 'i',
'collapse': 'i',
'all': 'a',
'empty': 'e',
}
table_columns = ['name', 'brief_description', 'description', 'methods', 'constants', 'members', 'signals']
table_column_names = ['Name', 'Brief Desc.', 'Desc.', 'Methods', 'Constants', 'Members', 'Signals']
colors = {
'name': [36], # cyan
'part_big_problem': [4, 31], # underline, red
'part_problem': [31], # red
'part_mostly_good': [33], # yellow
'part_good': [32], # green
'url': [4, 34], # underline, blue
'section': [1, 4], # bold, underline
'state_off': [36], # cyan
'state_on': [1, 35], # bold, magenta/plum
}
overall_progress_description_weigth = 10
################################################################################
# Utils #
################################################################################
def validate_tag(elem, tag):
if elem.tag != tag:
print('Tag mismatch, expected "' + tag + '", got ' + elem.tag)
sys.exit(255)
def color(color, string):
if flags['c'] and terminal_supports_color():
color_format = ''
for code in colors[color]:
color_format += '\033[' + str(code) + 'm'
return color_format + string + '\033[0m'
else:
return string
ansi_escape = re.compile(r'\x1b[^m]*m')
def nonescape_len(s):
return len(ansi_escape.sub('', s))
def terminal_supports_color():
p = sys.platform
supported_platform = p != 'Pocket PC' and (p != 'win32' or
'ANSICON' in os.environ)
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
################################################################################
# Classes #
################################################################################
class ClassStatusProgress:
def __init__(self, described=0, total=0):
self.described = described
self.total = total
def __add__(self, other):
return ClassStatusProgress(self.described + other.described, self.total + other.total)
def increment(self, described):
if described:
self.described += 1
self.total += 1
def is_ok(self):
return self.described >= self.total
def to_configured_colored_string(self):
if flags['p']:
return self.to_colored_string('{percent}% ({has}/{total})', '{pad_percent}{pad_described}{s}{pad_total}')
else:
return self.to_colored_string()
def to_colored_string(self, format='{has}/{total}', pad_format='{pad_described}{s}{pad_total}'):
ratio = float(self.described) / float(self.total) if self.total != 0 else 1
percent = int(round(100 * ratio))
s = format.format(has=str(self.described), total=str(self.total), percent=str(percent))
if self.described >= self.total:
s = color('part_good', s)
elif self.described >= self.total / 4 * 3:
s = color('part_mostly_good', s)
elif self.described > 0:
s = color('part_problem', s)
else:
s = color('part_big_problem', s)
pad_size = max(len(str(self.described)), len(str(self.total)))
pad_described = ''.ljust(pad_size - len(str(self.described)))
pad_percent = ''.ljust(3 - len(str(percent)))
pad_total = ''.ljust(pad_size - len(str(self.total)))
return pad_format.format(pad_described=pad_described, pad_total=pad_total, pad_percent=pad_percent, s=s)
class ClassStatus:
def __init__(self, name=''):
self.name = name
self.has_brief_description = True
self.has_description = True
self.progresses = {
'methods': ClassStatusProgress(),
'constants': ClassStatusProgress(),
'members': ClassStatusProgress(),
'signals': ClassStatusProgress()
}
def __add__(self, other):
new_status = ClassStatus()
new_status.name = self.name
new_status.has_brief_description = self.has_brief_description and other.has_brief_description
new_status.has_description = self.has_description and other.has_description
for k in self.progresses:
new_status.progresses[k] = self.progresses[k] + other.progresses[k]
return new_status
def is_ok(self):
ok = True
ok = ok and self.has_brief_description
ok = ok and self.has_description
for k in self.progresses:
ok = ok and self.progresses[k].is_ok()
return ok
def is_empty(self):
sum = 0
for k in self.progresses:
if self.progresses[k].is_ok():
continue
sum += self.progresses[k].total
return sum < 1
def make_output(self):
output = {}
output['name'] = color('name', self.name)
ok_string = color('part_good', 'OK')
missing_string = color('part_big_problem', 'MISSING')
output['brief_description'] = ok_string if self.has_brief_description else missing_string
output['description'] = ok_string if self.has_description else missing_string
description_progress = ClassStatusProgress(
(self.has_brief_description + self.has_description) * overall_progress_description_weigth,
2 * overall_progress_description_weigth
)
items_progress = ClassStatusProgress()
for k in ['methods', 'constants', 'members', 'signals']:
items_progress += self.progresses[k]
output[k] = self.progresses[k].to_configured_colored_string()
output['items'] = items_progress.to_configured_colored_string()
output['overall'] = (description_progress + items_progress).to_colored_string('{percent}%', '{pad_percent}{s}')
if self.name.startswith('Total'):
output['url'] = color('url', 'http://docs.godotengine.org/en/latest/classes/')
if flags['s']:
output['comment'] = color('part_good', 'ALL OK')
else:
output['url'] = color('url', 'http://docs.godotengine.org/en/latest/classes/class_{name}.html'.format(name=self.name.lower()))
if flags['s'] and not flags['g'] and self.is_ok():
output['comment'] = color('part_good', 'ALL OK')
return output
@staticmethod
def generate_for_class(c):
status = ClassStatus()
status.name = c.attrib['name']
# setgets do not count
methods = []
for tag in list(c):
if tag.tag in ['methods']:
for sub_tag in list(tag):
methods.append(sub_tag.find('name'))
if tag.tag in ['members']:
for sub_tag in list(tag):
try:
methods.remove(sub_tag.find('setter'))
methods.remove(sub_tag.find('getter'))
except:
pass
for tag in list(c):
if tag.tag == 'brief_description':
status.has_brief_description = len(tag.text.strip()) > 0
elif tag.tag == 'description':
status.has_description = len(tag.text.strip()) > 0
elif tag.tag in ['methods', 'signals']:
for sub_tag in list(tag):
if sub_tag.find('name') in methods or tag.tag == 'signals':
descr = sub_tag.find('description')
status.progresses[tag.tag].increment(len(descr.text.strip()) > 0)
elif tag.tag in ['constants', 'members']:
for sub_tag in list(tag):
status.progresses[tag.tag].increment(len(sub_tag.text.strip()) > 0)
elif tag.tag in ['tutorials', 'demos']:
pass # Ignore those tags for now
elif tag.tag in ['theme_items']:
pass # Ignore those tags, since they seem to lack description at all
else:
print(tag.tag, tag.attrib)
return status
################################################################################
# Arguments #
################################################################################
input_file_list = []
input_class_list = []
merged_file = ""
for arg in sys.argv[1:]:
try:
if arg.startswith('--'):
flags[long_flags[arg[2:]]] = not flags[long_flags[arg[2:]]]
elif arg.startswith('-'):
for f in arg[1:]:
flags[f] = not flags[f]
elif os.path.isdir(arg):
for f in os.listdir(arg):
if f.endswith('.xml'):
input_file_list.append(os.path.join(arg, f));
else:
input_class_list.append(arg)
except KeyError:
print("Unknown command line flag: " + arg)
sys.exit(1)
if flags['i']:
for r in ['methods', 'constants', 'members', 'signals']:
index = table_columns.index(r)
del table_column_names[index]
del table_columns[index]
table_column_names.append('Items')
table_columns.append('items')
if flags['o'] == (not flags['i']):
table_column_names.append('Overall')
table_columns.append('overall')
if flags['u']:
table_column_names.append('Docs URL')
table_columns.append('url')
################################################################################
# Help #
################################################################################
if len(input_file_list) < 1 or flags['h']:
if not flags['h']:
print(color('section', 'Invalid usage') + ': Please specify a classes directory')
print(color('section', 'Usage') + ': doc_status.py [flags] <classes_dir> [class names]')
print('\t< and > signify required parameters, while [ and ] signify optional parameters.')
print(color('section', 'Available flags') + ':')
possible_synonym_list = list(long_flags)
possible_synonym_list.sort()
flag_list = list(flags)
flag_list.sort()
for flag in flag_list:
synonyms = [color('name', '-' + flag)]
for synonym in possible_synonym_list:
if long_flags[synonym] == flag:
synonyms.append(color('name', '--' + synonym))
print(('{synonyms} (Currently ' + color('state_' + ('on' if flags[flag] else 'off'), '{value}') + ')\n\t{description}').format(
synonyms=', '.join(synonyms),
value=('on' if flags[flag] else 'off'),
description=flag_descriptions[flag]
))
sys.exit(0)
################################################################################
# Parse class list #
################################################################################
class_names = []
classes = {}
for file in input_file_list:
tree = ET.parse(file)
doc = tree.getroot()
if 'version' not in doc.attrib:
print('Version missing from "doc"')
sys.exit(255)
version = doc.attrib['version']
if doc.attrib['name'] in class_names:
continue
class_names.append(doc.attrib['name'])
classes[doc.attrib['name']] = doc
class_names.sort()
if len(input_class_list) < 1:
input_class_list = ['*']
filtered_classes = set()
for pattern in input_class_list:
filtered_classes |= set(fnmatch.filter(class_names, pattern))
filtered_classes = list(filtered_classes)
filtered_classes.sort()
################################################################################
# Make output table #
################################################################################
table = [table_column_names]
table_row_chars = '| - '
table_column_chars = '|'
total_status = ClassStatus('Total')
for cn in filtered_classes:
c = classes[cn]
validate_tag(c, 'class')
status = ClassStatus.generate_for_class(c)
total_status = total_status + status
if (flags['b'] and status.is_ok()) or (flags['g'] and not status.is_ok()) or (not flags['a']):
continue
if flags['e'] and status.is_empty():
continue
out = status.make_output()
row = []
for column in table_columns:
if column in out:
row.append(out[column])
else:
row.append('')
if 'comment' in out and out['comment'] != '':
row.append(out['comment'])
table.append(row)
################################################################################
# Print output table #
################################################################################
if len(table) == 1 and flags['a']:
print(color('part_big_problem', 'No classes suitable for printing!'))
sys.exit(0)
if len(table) > 2 or not flags['a']:
total_status.name = 'Total = {0}'.format(len(table) - 1)
out = total_status.make_output()
row = []
for column in table_columns:
if column in out:
row.append(out[column])
else:
row.append('')
table.append(row)
table_column_sizes = []
for row in table:
for cell_i, cell in enumerate(row):
if cell_i >= len(table_column_sizes):
table_column_sizes.append(0)
table_column_sizes[cell_i] = max(nonescape_len(cell), table_column_sizes[cell_i])
divider_string = table_row_chars[0]
for cell_i in range(len(table[0])):
divider_string += table_row_chars[1] + table_row_chars[2] * (table_column_sizes[cell_i]) + table_row_chars[1] + table_row_chars[0]
print(divider_string)
for row_i, row in enumerate(table):
row_string = table_column_chars
for cell_i, cell in enumerate(row):
padding_needed = table_column_sizes[cell_i] - nonescape_len(cell) + 2
if cell_i == 0:
row_string += table_row_chars[3] + cell + table_row_chars[3] * (padding_needed - 1)
else:
row_string += table_row_chars[3] * int(math.floor(float(padding_needed) / 2)) + cell + table_row_chars[3] * int(math.ceil(float(padding_needed) / 2))
row_string += table_column_chars
print(row_string)
if row_i == 0 or row_i == len(table) - 2:
print(divider_string)
print(divider_string)
if total_status.is_ok() and not flags['g']:
print('All listed classes are ' + color('part_good', 'OK') + '!')
| FateAce/godot | doc/tools/doc_status.py | Python | mit | 16,238 |
from setuptools import setup
setup(
name='popo_attribute_tracker',
version='0.1',
description='Simple Attribute Tracker for POPOs',
url='https://github.com/farfanoide/popo_attribute_tracker',
author='Ivan Karl',
author_email='[email protected]',
license='MIT',
packages=['popo_attribute_tracker'],
zip_safe=False
)
| farfanoide/popo_attribute_tracker | setup.py | Python | mit | 353 |
import unittest
class TestLDAPSyncrepl(unittest.TestCase):
def test_001_import_syncrepl(self):
from ldap import syncrepl
| tpokorra/pykolab | tests/unit/test-007-ldap_syncrepl.py | Python | gpl-3.0 | 136 |
# coding=UTF-8
# Copyright (C) 2002-2006 Stephen Kennedy <[email protected]>
# Copyright (C) 2009-2013 Kai Willadsen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import functools
import io
import os
import time
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gio
from gi.repository import Gdk
from gi.repository import Gtk
from meld.conf import _
from . import diffutil
from . import matchers
from . import meldbuffer
from . import melddoc
from . import merge
from . import misc
from . import patchdialog
from . import recent
from . import undo
from .ui import findbar
from .ui import gnomeglade
from meld.const import MODE_REPLACE, MODE_DELETE, MODE_INSERT
from meld.settings import bind_settings, meldsettings, settings
from .util.compat import text_type
from meld.sourceview import LanguageManager
class CachedSequenceMatcher(object):
"""Simple class for caching diff results, with LRU-based eviction
Results from the SequenceMatcher are cached and timestamped, and
subsequently evicted based on least-recent generation/usage. The LRU-based
eviction is overly simplistic, but is okay for our usage pattern.
"""
process_pool = None
def __init__(self, algorithm="unified"):
if self.process_pool is None:
if os.name == "nt":
CachedSequenceMatcher.process_pool = ThreadPool(None)
else:
# maxtasksperchild is new in Python 2.7; this is for 2.6 compat
try:
CachedSequenceMatcher.process_pool = Pool(
None, matchers.init_worker, maxtasksperchild=1)
except TypeError:
CachedSequenceMatcher.process_pool = Pool(
None, matchers.init_worker)
self.cache = {}
self.algorithm = algorithm
def match(self, text1, textn, cb):
try:
self.cache[(text1, textn)][1] = time.time()
opcodes = self.cache[(text1, textn)][0]
# FIXME: This idle should be totally unnecessary, and yet nothing
# gets highlighted without it, even though everything in the
# callback appears to run identically.
GLib.idle_add(lambda: cb(opcodes))
except KeyError:
from bzrlib.patiencediff import PatienceSequenceMatcher
if (self.algorithm == "patience"):
matcher = PatienceSequenceMatcher(None, text1, textn)
else:
matcher = matchers.MyersSequenceMatcher(None, text1, textn)
def inline_cb(opcodes):
self.cache[(text1, textn)] = [opcodes, time.time()]
GLib.idle_add(lambda: cb(opcodes))
self.process_pool.apply_async(matchers.matcher_worker,
(text1, textn),
callback=inline_cb)
def clean(self, size_hint):
"""Clean the cache if necessary
@param size_hint: the recommended minimum number of cache entries
"""
if len(self.cache) < size_hint * 3:
return
items = self.cache.items()
items.sort(key=lambda it: it[1][1])
for item in items[:-size_hint * 2]:
del self.cache[item[0]]
MASK_SHIFT, MASK_CTRL = 1, 2
class CursorDetails(object):
__slots__ = ("pane", "pos", "line", "offset", "chunk", "prev", "next",
"prev_conflict", "next_conflict")
def __init__(self):
for var in self.__slots__:
setattr(self, var, None)
class TaskEntry(object):
__slots__ = ("filename", "file", "buf", "codec", "pane", "was_cr")
def __init__(self, *args):
for var, val in zip(self.__slots__, args):
setattr(self, var, val)
class TextviewLineAnimation(object):
__slots__ = ("start_mark", "end_mark", "start_rgba", "end_rgba",
"start_time", "duration")
def __init__(self, mark0, mark1, rgba0, rgba1, duration):
self.start_mark = mark0
self.end_mark = mark1
self.start_rgba = rgba0
self.end_rgba = rgba1
self.start_time = GLib.get_monotonic_time()
self.duration = duration
class FileDiff(melddoc.MeldDoc, gnomeglade.Component):
"""Two or three way comparison of text files"""
__gtype_name__ = "FileDiff"
__gsettings_bindings__ = (
('highlight-current-line', 'highlight-current-line'),
('ignore-blank-lines', 'ignore-blank-lines'),
)
highlight_current_line = GObject.property(type=bool, default=False)
ignore_blank_lines = GObject.property(
type=bool,
nick="Ignore blank lines",
blurb="Whether to ignore blank lines when comparing file contents",
default=False,
)
differ = diffutil.Differ
keylookup = {
Gdk.KEY_Shift_L: MASK_SHIFT,
Gdk.KEY_Shift_R: MASK_SHIFT,
Gdk.KEY_Control_L: MASK_CTRL,
Gdk.KEY_Control_R: MASK_CTRL,
}
# Identifiers for MsgArea messages
(MSG_SAME, MSG_SLOW_HIGHLIGHT, MSG_SYNCPOINTS) = list(range(3))
__gsignals__ = {
'next-conflict-changed': (GObject.SignalFlags.RUN_FIRST, None, (bool, bool)),
'action-mode-changed': (GObject.SignalFlags.RUN_FIRST, None, (int,)),
}
def __init__(self, num_panes, algorithm="unified"):
"""Start up an filediff with num_panes empty contents.
"""
self.algorithm=algorithm
melddoc.MeldDoc.__init__(self)
gnomeglade.Component.__init__(self, "filediff.ui", "filediff")
bind_settings(self)
widget_lists = [
"diffmap", "file_save_button", "file_toolbar", "fileentry",
"linkmap", "msgarea_mgr", "readonlytoggle",
"scrolledwindow", "selector_hbox", "textview", "vbox",
"dummy_toolbar_linkmap", "filelabel_toolitem", "filelabel",
"fileentry_toolitem", "dummy_toolbar_diffmap"
]
self.map_widgets_into_lists(widget_lists)
# This SizeGroup isn't actually necessary for FileDiff; it's for
# handling non-homogenous selectors in FileComp. It's also fragile.
column_sizes = Gtk.SizeGroup(mode=Gtk.SizeGroupMode.HORIZONTAL)
column_sizes.set_ignore_hidden(True)
for widget in self.selector_hbox:
column_sizes.add_widget(widget)
self.warned_bad_comparison = False
for v in self.textview:
buf = meldbuffer.MeldBuffer()
buf.connect('begin_user_action',
self.on_textbuffer_begin_user_action)
buf.connect('end_user_action', self.on_textbuffer_end_user_action)
v.set_buffer(buf)
buf.data.connect('file-changed', self.notify_file_changed)
self._keymask = 0
self.load_font()
self.meta = {}
self.deleted_lines_pending = -1
self.textview_overwrite = 0
self.focus_pane = None
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.textbuffer = [v.get_buffer() for v in self.textview]
self.buffer_texts = [meldbuffer.BufferLines(b) for b in self.textbuffer]
self.undosequence = undo.UndoSequence()
self.text_filters = []
self.create_text_filters()
self.settings_handlers = [
meldsettings.connect("text-filters-changed",
self.on_text_filters_changed)
]
self.buffer_filtered = [meldbuffer.BufferLines(b, self._filter_text)
for b in self.textbuffer]
for (i, w) in enumerate(self.scrolledwindow):
w.get_vadjustment().connect("value-changed", self._sync_vscroll, i)
w.get_hadjustment().connect("value-changed", self._sync_hscroll)
self._connect_buffer_handlers()
self._sync_vscroll_lock = False
self._sync_hscroll_lock = False
self._scroll_lock = False
self.linediffer = self.differ(self.algorithm)
self.force_highlight = False
self.syncpoints = []
self.in_nested_textview_gutter_expose = False
self._cached_match = CachedSequenceMatcher()
self.anim_source_id = [None for buf in self.textbuffer]
self.animating_chunks = [[] for buf in self.textbuffer]
for buf in self.textbuffer:
buf.create_tag("inline")
buf.connect("notify::has-selection",
self.update_text_actions_sensitivity)
actions = (
("MakePatch", None, _("Format as Patch..."), None,
_("Create a patch using differences between files"),
self.make_patch),
("SaveAll", None, _("Save A_ll"), "<Ctrl><Shift>L",
_("Save all files in the current comparison"),
self.on_save_all_activate),
("Revert", Gtk.STOCK_REVERT_TO_SAVED, None, None,
_("Revert files to their saved versions"),
self.on_revert_activate),
("SplitAdd", None, _("Add Synchronization Point"), None,
_("Add a manual point for synchronization of changes between "
"files"),
self.add_sync_point),
("SplitClear", None, _("Clear Synchronization Points"), None,
_("Clear manual change sychronization points"),
self.clear_sync_points),
("PrevConflict", None, _("Previous Conflict"), "<Ctrl>I",
_("Go to the previous conflict"),
lambda x: self.on_next_conflict(Gdk.ScrollDirection.UP)),
("NextConflict", None, _("Next Conflict"), "<Ctrl>K",
_("Go to the next conflict"),
lambda x: self.on_next_conflict(Gdk.ScrollDirection.DOWN)),
("PushLeft", Gtk.STOCK_GO_BACK, _("Push to Left"), "<Alt>Left",
_("Push current change to the left"),
lambda x: self.push_change(-1)),
("PushRight", Gtk.STOCK_GO_FORWARD,
_("Push to Right"), "<Alt>Right",
_("Push current change to the right"),
lambda x: self.push_change(1)),
# FIXME: using LAST and FIRST is terrible and unreliable icon abuse
("PullLeft", Gtk.STOCK_GOTO_LAST,
_("Pull from Left"), "<Alt><Shift>Right",
_("Pull change from the left"),
lambda x: self.pull_change(-1)),
("PullRight", Gtk.STOCK_GOTO_FIRST,
_("Pull from Right"), "<Alt><Shift>Left",
_("Pull change from the right"),
lambda x: self.pull_change(1)),
("CopyLeftUp", None, _("Copy Above Left"), "<Alt>bracketleft",
_("Copy change above the left chunk"),
lambda x: self.copy_change(-1, -1)),
("CopyLeftDown", None, _("Copy Below Left"), "<Alt>semicolon",
_("Copy change below the left chunk"),
lambda x: self.copy_change(-1, 1)),
("CopyRightUp", None, _("Copy Above Right"), "<Alt>bracketright",
_("Copy change above the right chunk"),
lambda x: self.copy_change(1, -1)),
("CopyRightDown", None, _("Copy Below Right"), "<Alt>quoteright",
_("Copy change below the right chunk"),
lambda x: self.copy_change(1, 1)),
("Delete", Gtk.STOCK_DELETE, _("Delete"), "<Alt>Delete",
_("Delete change"),
self.delete_change),
("MergeFromLeft", None, _("Merge All from Left"), None,
_("Merge all non-conflicting changes from the left"),
lambda x: self.pull_all_non_conflicting_changes(-1)),
("MergeFromRight", None, _("Merge All from Right"), None,
_("Merge all non-conflicting changes from the right"),
lambda x: self.pull_all_non_conflicting_changes(1)),
("MergeAll", None, _("Merge All"), None,
_("Merge all non-conflicting changes from left and right "
"panes"),
lambda x: self.merge_all_non_conflicting_changes()),
("CycleDocuments", None,
_("Cycle Through Documents"), "<control>Escape",
_("Move keyboard focus to the next document in this "
"comparison"),
self.action_cycle_documents),
)
toggle_actions = (
("LockScrolling", None, _("Lock Scrolling"), None,
_("Lock scrolling of all panes"),
self.on_action_lock_scrolling_toggled, True),
)
self.ui_file = gnomeglade.ui_file("filediff-ui.xml")
self.actiongroup = Gtk.ActionGroup(name="FilediffPopupActions")
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggle_actions)
self.findbar = findbar.FindBar(self.grid)
self.grid.attach(self.findbar.widget, 1, 2, 5, 1)
self.widget.ensure_style()
self.on_style_updated(self.widget)
self.widget.connect("style-updated", self.on_style_updated)
self.set_num_panes(num_panes)
self.cursor = CursorDetails()
self.connect("current-diff-changed", self.on_current_diff_changed)
for t in self.textview:
t.connect("focus-in-event", self.on_current_diff_changed)
t.connect("focus-out-event", self.on_current_diff_changed)
self.linediffer.connect("diffs-changed", self.on_diffs_changed)
self.undosequence.connect("checkpointed", self.on_undo_checkpointed)
self.connect("next-conflict-changed", self.on_next_conflict_changed)
for diffmap in self.diffmap:
self.linediffer.connect('diffs-changed', diffmap.on_diffs_changed)
overwrite_label = Gtk.Label()
overwrite_label.show()
cursor_label = Gtk.Label()
cursor_label.show()
self.status_info_labels = [overwrite_label, cursor_label]
self.statusbar.set_info_box(self.status_info_labels)
# Prototype implementation
from meld.gutterrendererchunk import GutterRendererChunkAction
for pane, t in enumerate(self.textview):
# FIXME: set_num_panes will break this good
if pane == 0 or (pane == 1 and self.num_panes == 3):
window = Gtk.TextWindowType.RIGHT
views = [self.textview[pane], self.textview[pane + 1]]
renderer = GutterRendererChunkAction(pane, pane + 1, views, self, self.linediffer)
gutter = t.get_gutter(window)
gutter.insert(renderer, 10)
if pane in (1, 2):
window = Gtk.TextWindowType.LEFT
views = [self.textview[pane], self.textview[pane - 1]]
renderer = GutterRendererChunkAction(pane, pane - 1, views, self, self.linediffer)
gutter = t.get_gutter(window)
gutter.insert(renderer, 10)
self.connect("notify::ignore-blank-lines", self.refresh_comparison)
meldsettings.connect('changed', self.on_setting_changed)
def get_keymask(self):
return self._keymask
def set_keymask(self, value):
if value & MASK_SHIFT:
mode = MODE_DELETE
elif value & MASK_CTRL:
mode = MODE_INSERT
else:
mode = MODE_REPLACE
self._keymask = value
self.emit("action-mode-changed", mode)
keymask = property(get_keymask, set_keymask)
def on_key_event(self, object, event):
keymap = Gdk.Keymap.get_default()
ok, keyval, group, lvl, consumed = keymap.translate_keyboard_state(
event.hardware_keycode, 0, event.group)
mod_key = self.keylookup.get(keyval, 0)
if event.type == Gdk.EventType.KEY_PRESS:
self.keymask |= mod_key
if event.keyval == Gdk.KEY_Escape:
self.findbar.hide()
elif event.type == Gdk.EventType.KEY_RELEASE:
self.keymask &= ~mod_key
def on_style_updated(self, widget):
style = widget.get_style_context()
def lookup(name, default):
found, colour = style.lookup_color(name)
if not found:
colour = Gdk.RGBA()
colour.parse(default)
return colour
for buf in self.textbuffer:
tag = buf.get_tag_table().lookup("inline")
tag.props.background_rgba = lookup("inline-bg", "LightSteelBlue2")
self.fill_colors = {"insert" : lookup("insert-bg", "DarkSeaGreen1"),
"delete" : lookup("insert-bg", "DarkSeaGreen1"),
"conflict": lookup("conflict-bg", "Pink"),
"replace" : lookup("replace-bg", "#ddeeff"),
"current-chunk-highlight":
lookup("current-chunk-highlight", '#ffffff')}
self.line_colors = {"insert" : lookup("insert-outline", "#77f077"),
"delete" : lookup("insert-outline", "#77f077"),
"conflict": lookup("conflict-outline", "#f0768b"),
"replace" : lookup("replace-outline", "#8bbff3")}
self.highlight_color = lookup("current-line-highlight", "#ffff00")
self.syncpoint_color = lookup("syncpoint-outline", "#555555")
for associated in self.diffmap + self.linkmap:
associated.set_color_scheme([self.fill_colors, self.line_colors])
self.queue_draw()
def on_focus_change(self):
self.keymask = 0
def on_text_filters_changed(self, app):
relevant_change = self.create_text_filters()
if relevant_change:
self.refresh_comparison()
def create_text_filters(self):
# In contrast to file filters, ordering of text filters can matter
old_active = [f.filter_string for f in self.text_filters if f.active]
new_active = [f.filter_string for f in meldsettings.text_filters
if f.active]
active_filters_changed = old_active != new_active
self.text_filters = [copy.copy(f) for f in meldsettings.text_filters]
return active_filters_changed
def _disconnect_buffer_handlers(self):
for textview in self.textview:
textview.set_editable(0)
for buf in self.textbuffer:
assert hasattr(buf,"handlers")
for h in buf.handlers:
buf.disconnect(h)
def _connect_buffer_handlers(self):
for textview, buf in zip(self.textview, self.textbuffer):
textview.set_editable(buf.data.editable)
for buf in self.textbuffer:
id0 = buf.connect("insert-text", self.on_text_insert_text)
id1 = buf.connect("delete-range", self.on_text_delete_range)
id2 = buf.connect_after("insert-text", self.after_text_insert_text)
id3 = buf.connect_after("delete-range", self.after_text_delete_range)
id4 = buf.connect("notify::cursor-position",
self.on_cursor_position_changed)
buf.handlers = id0, id1, id2, id3, id4
# Abbreviations for insert and overwrite that fit in the status bar
_insert_overwrite_text = (_("INS"), _("OVR"))
# Abbreviation for line, column so that it will fit in the status bar
_line_column_text = _("Ln %i, Col %i")
def on_cursor_position_changed(self, buf, pspec, force=False):
pane = self.textbuffer.index(buf)
pos = buf.props.cursor_position
if pane == self.cursor.pane and pos == self.cursor.pos and not force:
return
self.cursor.pane, self.cursor.pos = pane, pos
cursor_it = buf.get_iter_at_offset(pos)
offset = cursor_it.get_line_offset()
line = cursor_it.get_line()
insert_overwrite = self._insert_overwrite_text[self.textview_overwrite]
line_column = self._line_column_text % (line + 1, offset + 1)
self.status_info_labels[0].set_text(insert_overwrite)
self.status_info_labels[1].set_text(line_column)
if line != self.cursor.line or force:
chunk, prev, next_ = self.linediffer.locate_chunk(pane, line)
if chunk != self.cursor.chunk or force:
self.cursor.chunk = chunk
self.emit("current-diff-changed")
if prev != self.cursor.prev or next_ != self.cursor.next or force:
self.emit("next-diff-changed", prev is not None,
next_ is not None)
prev_conflict, next_conflict = None, None
for conflict in self.linediffer.conflicts:
if prev is not None and conflict <= prev:
prev_conflict = conflict
if next_ is not None and conflict >= next_:
next_conflict = conflict
break
if prev_conflict != self.cursor.prev_conflict or \
next_conflict != self.cursor.next_conflict or force:
self.emit("next-conflict-changed", prev_conflict is not None,
next_conflict is not None)
self.cursor.prev, self.cursor.next = prev, next_
self.cursor.prev_conflict = prev_conflict
self.cursor.next_conflict = next_conflict
self.cursor.line, self.cursor.offset = line, offset
def on_current_diff_changed(self, widget, *args):
pane = self._get_focused_pane()
if pane != -1:
# While this *should* be redundant, it's possible for focus pane
# and cursor pane to be different in several situations.
pane = self.cursor.pane
chunk_id = self.cursor.chunk
if pane == -1 or chunk_id is None:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (False,) * 7
else:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (True,) * 7
# Push and Delete are active if the current pane has something to
# act on, and the target pane exists and is editable. Pull is
# sensitive if the source pane has something to get, and the
# current pane is editable. Copy actions are sensitive if the
# conditions for push are met, *and* there is some content in the
# target pane.
editable = self.textview[pane].get_editable()
editable_left = pane > 0 and self.textview[pane - 1].get_editable()
editable_right = pane < self.num_panes - 1 and \
self.textview[pane + 1].get_editable()
if pane == 0 or pane == 2:
chunk = self.linediffer.get_chunk(chunk_id, pane)
insert_chunk = chunk[1] == chunk[2]
delete_chunk = chunk[3] == chunk[4]
push_left = editable_left and not insert_chunk
push_right = editable_right and not insert_chunk
pull_left = pane == 2 and editable and not delete_chunk
pull_right = pane == 0 and editable and not delete_chunk
delete = editable and not insert_chunk
copy_left = push_left and not delete_chunk
copy_right = push_right and not delete_chunk
elif pane == 1:
chunk0 = self.linediffer.get_chunk(chunk_id, 1, 0)
chunk2 = None
if self.num_panes == 3:
chunk2 = self.linediffer.get_chunk(chunk_id, 1, 2)
left_mid_exists = chunk0 is not None and chunk0[1] != chunk0[2]
left_exists = chunk0 is not None and chunk0[3] != chunk0[4]
right_mid_exists = chunk2 is not None and chunk2[1] != chunk2[2]
right_exists = chunk2 is not None and chunk2[3] != chunk2[4]
push_left = editable_left and left_mid_exists
push_right = editable_right and right_mid_exists
pull_left = editable and left_exists
pull_right = editable and right_exists
delete = editable and (left_mid_exists or right_mid_exists)
copy_left = push_left and left_exists
copy_right = push_right and right_exists
self.actiongroup.get_action("PushLeft").set_sensitive(push_left)
self.actiongroup.get_action("PushRight").set_sensitive(push_right)
self.actiongroup.get_action("PullLeft").set_sensitive(pull_left)
self.actiongroup.get_action("PullRight").set_sensitive(pull_right)
self.actiongroup.get_action("Delete").set_sensitive(delete)
self.actiongroup.get_action("CopyLeftUp").set_sensitive(copy_left)
self.actiongroup.get_action("CopyLeftDown").set_sensitive(copy_left)
self.actiongroup.get_action("CopyRightUp").set_sensitive(copy_right)
self.actiongroup.get_action("CopyRightDown").set_sensitive(copy_right)
# FIXME: don't queue_draw() on everything... just on what changed
self.queue_draw()
def on_next_conflict_changed(self, doc, have_prev, have_next):
self.actiongroup.get_action("PrevConflict").set_sensitive(have_prev)
self.actiongroup.get_action("NextConflict").set_sensitive(have_next)
def on_next_conflict(self, direction):
if direction == Gdk.ScrollDirection.DOWN:
target = self.cursor.next_conflict
else: # direction == Gdk.ScrollDirection.UP
target = self.cursor.prev_conflict
if target is None:
return
buf = self.textbuffer[self.cursor.pane]
chunk = self.linediffer.get_chunk(target, self.cursor.pane)
buf.place_cursor(buf.get_iter_at_line(chunk[1]))
self.textview[self.cursor.pane].scroll_to_mark(
buf.get_insert(), 0.1, True, 0.5, 0.5)
def push_change(self, direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def pull_change(self, direction):
dst = self._get_focused_pane()
src = dst + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(dst != -1 and self.cursor.chunk is not None)
assert(src in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def copy_change(self, direction, copy_direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
copy_up = True if copy_direction < 0 else False
self.copy_chunk(src, dst, chunk, copy_up)
def pull_all_non_conflicting_changes(self, direction):
assert direction in (-1, 1)
dst = self._get_focused_pane()
src = dst + direction
assert src in range(self.num_panes)
merger = merge.Merger(self.algorithm)
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_2_files(src, dst):
pass
self._sync_vscroll_lock = True
self.on_textbuffer_begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer_end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[src].get_vadjustment(), src)
self.scheduler.add_task(resync)
def merge_all_non_conflicting_changes(self):
dst = 1
merger = merge.Merger(self.algorithm)
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_3_files(False):
pass
self._sync_vscroll_lock = True
self.on_textbuffer_begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer_end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[0].get_vadjustment(), 0)
self.scheduler.add_task(resync)
def delete_change(self, widget):
pane = self._get_focused_pane()
chunk = self.linediffer.get_chunk(self.cursor.chunk, pane)
assert(pane != -1 and self.cursor.chunk is not None)
assert(chunk is not None)
self.delete_chunk(pane, chunk)
def _synth_chunk(self, pane0, pane1, line):
"""Returns the Same chunk that would exist at
the given location if we didn't remove Same chunks"""
# This method is a hack around our existing diffutil data structures;
# getting rid of the Same chunk removal is difficult, as several places
# have baked in the assumption of only being given changed blocks.
buf0, buf1 = self.textbuffer[pane0], self.textbuffer[pane1]
start0, end0 = 0, buf0.get_line_count() - 1
start1, end1 = 0, buf1.get_line_count() - 1
# This hack is required when pane0's prev/next chunk doesn't exist
# (i.e., is Same) between pane0 and pane1.
prev_chunk0, prev_chunk1, next_chunk0, next_chunk1 = (None,) * 4
_, prev, next_ = self.linediffer.locate_chunk(pane0, line)
if prev is not None:
while prev >= 0:
prev_chunk0 = self.linediffer.get_chunk(prev, pane0, pane1)
prev_chunk1 = self.linediffer.get_chunk(prev, pane1, pane0)
if None not in (prev_chunk0, prev_chunk1):
start0 = prev_chunk0[2]
start1 = prev_chunk1[2]
break
prev -= 1
if next_ is not None:
while next_ < self.linediffer.diff_count():
next_chunk0 = self.linediffer.get_chunk(next_, pane0, pane1)
next_chunk1 = self.linediffer.get_chunk(next_, pane1, pane0)
if None not in (next_chunk0, next_chunk1):
end0 = next_chunk0[1]
end1 = next_chunk1[1]
break
next_ += 1
return "Same", start0, end0, start1, end1
def _corresponding_chunk_line(self, chunk, line, pane, new_pane):
"""Approximates the corresponding line between panes"""
old_buf, new_buf = self.textbuffer[pane], self.textbuffer[new_pane]
# Special-case cross-pane jumps
if (pane == 0 and new_pane == 2) or (pane == 2 and new_pane == 0):
proxy = self._corresponding_chunk_line(chunk, line, pane, 1)
return self._corresponding_chunk_line(chunk, proxy, 1, new_pane)
# Either we are currently in a identifiable chunk, or we are in a Same
# chunk; if we establish the start/end of that chunk in both panes, we
# can figure out what our new offset should be.
cur_chunk = None
if chunk is not None:
cur_chunk = self.linediffer.get_chunk(chunk, pane, new_pane)
if cur_chunk is None:
cur_chunk = self._synth_chunk(pane, new_pane, line)
cur_start, cur_end, new_start, new_end = cur_chunk[1:5]
# If the new buffer's current cursor is already in the correct chunk,
# assume that we have in-progress editing, and don't move it.
cursor_it = new_buf.get_iter_at_mark(new_buf.get_insert())
cursor_line = cursor_it.get_line()
cursor_chunk, _, _ = self.linediffer.locate_chunk(new_pane, cursor_line)
if cursor_chunk is not None:
already_in_chunk = cursor_chunk == chunk
else:
cursor_chunk = self._synth_chunk(pane, new_pane, cursor_line)
already_in_chunk = cursor_chunk[3] == new_start and \
cursor_chunk[4] == new_end
if already_in_chunk:
new_line = cursor_line
else:
# Guess where to put the cursor: in the same chunk, at about the
# same place within the chunk, calculated proportionally by line.
# Insert chunks and one-line chunks are placed at the top.
if cur_end == cur_start:
chunk_offset = 0.0
else:
chunk_offset = (line - cur_start) / float(cur_end - cur_start)
new_line = new_start + int(chunk_offset * (new_end - new_start))
return new_line
def action_cycle_documents(self, widget):
pane = self._get_focused_pane()
new_pane = (pane + 1) % self.num_panes
chunk, line = self.cursor.chunk, self.cursor.line
new_line = self._corresponding_chunk_line(chunk, line, pane, new_pane)
new_buf = self.textbuffer[new_pane]
self.textview[new_pane].grab_focus()
new_buf.place_cursor(new_buf.get_iter_at_line(new_line))
self.textview[new_pane].scroll_to_mark(
new_buf.get_insert(), 0.1, True, 0.5, 0.5)
def _set_external_action_sensitivity(self):
have_file = self.focus_pane is not None
try:
self.main_actiongroup.get_action("OpenExternal").set_sensitive(
have_file)
except AttributeError:
pass
def on_textview_focus_in_event(self, view, event):
self.focus_pane = view
self.findbar.textview = view
self.on_cursor_position_changed(view.get_buffer(), None, True)
self._set_save_action_sensitivity()
self._set_merge_action_sensitivity()
self._set_external_action_sensitivity()
self.update_text_actions_sensitivity()
def on_textview_focus_out_event(self, view, event):
self._set_merge_action_sensitivity()
self._set_external_action_sensitivity()
def _after_text_modified(self, buffer, startline, sizechange):
if self.num_panes > 1:
pane = self.textbuffer.index(buffer)
if not self.linediffer.syncpoints:
self.linediffer.change_sequence(pane, startline, sizechange,
self.buffer_filtered)
# FIXME: diff-changed signal for the current buffer would be cleaner
focused_pane = self._get_focused_pane()
if focused_pane != -1:
self.on_cursor_position_changed(self.textbuffer[focused_pane],
None, True)
self.queue_draw()
def _filter_text(self, txt):
def killit(m):
assert m.group().count("\n") == 0
if len(m.groups()):
s = m.group()
for g in m.groups():
if g:
s = s.replace(g,"")
return s
else:
return ""
try:
for filt in self.text_filters:
if filt.active:
txt = filt.filter.sub(killit, txt)
except AssertionError:
if not self.warned_bad_comparison:
misc.run_dialog(_("Filter '%s' changed the number of lines in the file. "
"Comparison will be incorrect. See the user manual for more details.") % filt.label)
self.warned_bad_comparison = True
return txt
def after_text_insert_text(self, buf, it, newtext, textlen):
start_mark = buf.get_mark("insertion-start")
starting_at = buf.get_iter_at_mark(start_mark).get_line()
buf.delete_mark(start_mark)
lines_added = it.get_line() - starting_at
self._after_text_modified(buf, starting_at, lines_added)
def after_text_delete_range(self, buffer, it0, it1):
starting_at = it0.get_line()
assert self.deleted_lines_pending != -1
self._after_text_modified(buffer, starting_at, -self.deleted_lines_pending)
self.deleted_lines_pending = -1
def load_font(self):
context = self.textview0.get_pango_context()
metrics = context.get_metrics(meldsettings.font,
context.get_language())
line_height_points = metrics.get_ascent() + metrics.get_descent()
self.pixels_per_line = line_height_points // 1024
for i in range(3):
self.textview[i].override_font(meldsettings.font)
for i in range(2):
self.linkmap[i].queue_draw()
def on_setting_changed(self, settings, key):
if key == 'font':
self.load_font()
def check_save_modified(self, label=None):
response = Gtk.ResponseType.OK
modified = [b.data.modified for b in self.textbuffer]
if True in modified:
dialog = gnomeglade.Component("filediff.ui", "check_save_dialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
if label:
dialog.widget.props.text = label
# FIXME: Should be packed into dialog.widget.get_message_area(),
# but this is unbound on currently required PyGTK.
buttons = []
for i in range(self.num_panes):
button = Gtk.CheckButton(self.textbuffer[i].data.label)
button.set_use_underline(False)
button.set_sensitive(modified[i])
button.set_active(modified[i])
dialog.extra_vbox.pack_start(button, expand=True, fill=True,
padding=0)
buttons.append(button)
dialog.extra_vbox.show_all()
response = dialog.widget.run()
try_save = [b.get_active() for b in buttons]
dialog.widget.destroy()
if response == Gtk.ResponseType.OK:
for i in range(self.num_panes):
if try_save[i]:
if not self.save_file(i):
return Gtk.ResponseType.CANCEL
elif response == Gtk.ResponseType.DELETE_EVENT:
response = Gtk.ResponseType.CANCEL
if response == Gtk.ResponseType.OK and self.meta:
parent = self.meta.get('parent', None)
saved = self.meta.get('middle_saved', False)
prompt_resolve = self.meta.get('prompt_resolve', False)
if prompt_resolve and saved and parent.has_command('resolve'):
primary = _("Mark conflict as resolved?")
secondary = _(
"If the conflict was resolved successfully, you may mark "
"it as resolved now.")
buttons = ((_("Cancel"), Gtk.ResponseType.CANCEL),
(_("Mark _Resolved"), Gtk.ResponseType.OK))
resolve_response = misc.modal_dialog(
primary, secondary, buttons, parent=self.widget,
messagetype=Gtk.MessageType.QUESTION)
if resolve_response == Gtk.ResponseType.OK:
conflict_file = self.textbuffer[1].data.filename
parent.command('resolve', [conflict_file])
return response
def on_delete_event(self, appquit=0):
response = self.check_save_modified()
if response == Gtk.ResponseType.OK:
for h in self.settings_handlers:
meldsettings.disconnect(h)
# TODO: Base the return code on something meaningful for VC tools
self.emit('close', 0)
return response
#
# text buffer undo/redo
#
def on_undo_activate(self):
if self.undosequence.can_undo():
self.undosequence.undo()
def on_redo_activate(self):
if self.undosequence.can_redo():
self.undosequence.redo()
def on_textbuffer_begin_user_action(self, *buffer):
self.undosequence.begin_group()
def on_textbuffer_end_user_action(self, *buffer):
self.undosequence.end_group()
def on_text_insert_text(self, buf, it, text, textlen):
text = text_type(text, 'utf8')
self.undosequence.add_action(
meldbuffer.BufferInsertionAction(buf, it.get_offset(), text))
buf.create_mark("insertion-start", it, True)
def on_text_delete_range(self, buf, it0, it1):
text = text_type(buf.get_text(it0, it1, False), 'utf8')
assert self.deleted_lines_pending == -1
self.deleted_lines_pending = it1.get_line() - it0.get_line()
self.undosequence.add_action(
meldbuffer.BufferDeletionAction(buf, it0.get_offset(), text))
def on_undo_checkpointed(self, undosequence, buf, checkpointed):
self.set_buffer_modified(buf, not checkpointed)
#
#
#
def open_external(self):
pane = self._get_focused_pane()
if pane >= 0:
if self.textbuffer[pane].data.filename:
pos = self.textbuffer[pane].props.cursor_position
cursor_it = self.textbuffer[pane].get_iter_at_offset(pos)
line = cursor_it.get_line() + 1
self._open_files([self.textbuffer[pane].data.filename], line)
def update_text_actions_sensitivity(self, *args):
widget = self.focus_pane
if not widget:
cut, copy, paste = False, False, False
else:
cut = copy = widget.get_buffer().get_has_selection()
# Ideally, this would check whether the clipboard included
# something pasteable. However, there is no changed signal.
# widget.get_clipboard(
# Gdk.SELECTION_CLIPBOARD).wait_is_text_available()
paste = widget.get_editable()
if self.main_actiongroup:
for action, sens in zip(
("Cut", "Copy", "Paste"), (cut, copy, paste)):
self.main_actiongroup.get_action(action).set_sensitive(sens)
def get_selected_text(self):
"""Returns selected text of active pane"""
pane = self._get_focused_pane()
if pane != -1:
buf = self.textbuffer[pane]
sel = buf.get_selection_bounds()
if sel:
return text_type(buf.get_text(sel[0], sel[1], False), 'utf8')
return None
def on_find_activate(self, *args):
selected_text = self.get_selected_text()
self.findbar.start_find(self.focus_pane, selected_text)
self.keymask = 0
def on_replace_activate(self, *args):
selected_text = self.get_selected_text()
self.findbar.start_replace(self.focus_pane, selected_text)
self.keymask = 0
def on_find_next_activate(self, *args):
self.findbar.start_find_next(self.focus_pane)
def on_find_previous_activate(self, *args):
self.findbar.start_find_previous(self.focus_pane)
def on_scrolledwindow_size_allocate(self, scrolledwindow, allocation):
index = self.scrolledwindow.index(scrolledwindow)
if index == 0 or index == 1:
self.linkmap[0].queue_draw()
if index == 1 or index == 2:
self.linkmap[1].queue_draw()
def on_textview_popup_menu(self, textview):
self.popup_menu.popup(None, None, None, None, 0,
Gtk.get_current_event_time())
return True
def on_textview_button_press_event(self, textview, event):
if event.button == 3:
textview.grab_focus()
self.popup_menu.popup(None, None, None, None, event.button, event.time)
return True
return False
def on_textview_toggle_overwrite(self, view):
self.textview_overwrite = not self.textview_overwrite
for v,h in zip(self.textview, self.textview_overwrite_handlers):
v.disconnect(h)
if v != view:
v.emit("toggle-overwrite")
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.on_cursor_position_changed(view.get_buffer(), None, True)
#
# text buffer loading/saving
#
def set_labels(self, labels):
labels = labels[:len(self.textbuffer)]
for label, buf in zip(labels, self.textbuffer):
if label:
buf.data.label = label
def set_merge_output_file(self, filename):
if len(self.textbuffer) < 2:
return
buf = self.textbuffer[1]
buf.data.savefile = os.path.abspath(filename)
buf.data.label = filename
writable = True
if os.path.exists(buf.data.savefile):
writable = os.access(buf.data.savefile, os.W_OK)
self.set_buffer_writable(buf, writable)
self.fileentry[1].set_filename(buf.data.savefile)
self.recompute_label()
def _set_save_action_sensitivity(self):
pane = self._get_focused_pane()
modified = False if pane == -1 else self.textbuffer[pane].data.modified
if self.main_actiongroup:
self.main_actiongroup.get_action("Save").set_sensitive(modified)
any_modified = any(b.data.modified for b in self.textbuffer)
self.actiongroup.get_action("SaveAll").set_sensitive(any_modified)
def recompute_label(self):
self._set_save_action_sensitivity()
filenames = [b.data.label for b in self.textbuffer[:self.num_panes]]
shortnames = misc.shorten_names(*filenames)
for i, buf in enumerate(self.textbuffer[:self.num_panes]):
if buf.data.modified:
shortnames[i] += "*"
self.file_save_button[i].set_sensitive(buf.data.modified)
self.file_save_button[i].props.stock_id = (
Gtk.STOCK_SAVE if buf.data.writable else Gtk.STOCK_SAVE_AS)
label = self.meta.get("tablabel", "")
if label:
self.label_text = label
else:
self.label_text = (" — ").decode('utf8').join(shortnames)
self.tooltip_text = self.label_text
self.label_changed()
def set_files(self, files):
"""Set num panes to len(files) and load each file given.
If an element is None, the text of a pane is left as is.
"""
self._disconnect_buffer_handlers()
files = list(files)
for i, f in enumerate(files):
if not f:
continue
if not isinstance(f, unicode):
files[i] = f = f.decode('utf8')
absfile = os.path.abspath(f)
self.fileentry[i].set_filename(absfile)
self.textbuffer[i].reset_buffer(absfile)
self.msgarea_mgr[i].clear()
self.recompute_label()
self.textview[len(files) >= 2].grab_focus()
self._connect_buffer_handlers()
self.scheduler.add_task(self._set_files_internal(files))
def get_comparison(self):
files = [b.data.filename for b in self.textbuffer[:self.num_panes]]
return recent.TYPE_FILE, files
def _load_files(self, files, textbuffers):
self.undosequence.clear()
yield _("[%s] Set num panes") % self.label_text
self.set_num_panes( len(files) )
self._disconnect_buffer_handlers()
self.linediffer.clear()
self.queue_draw()
try_codecs = list(settings.get_value('detect-encodings'))
try_codecs.append('latin1')
yield _("[%s] Opening files") % self.label_text
tasks = []
def add_dismissable_msg(pane, icon, primary, secondary):
msgarea = self.msgarea_mgr[pane].new_from_text_and_icon(
icon, primary, secondary)
msgarea.add_button(_("Hi_de"), Gtk.ResponseType.CLOSE)
msgarea.connect("response",
lambda *args: self.msgarea_mgr[pane].clear())
msgarea.show_all()
return msgarea
for pane, filename in enumerate(files):
buf = textbuffers[pane]
if filename:
try:
handle = io.open(filename, "r", encoding=try_codecs[0])
task = TaskEntry(filename, handle, buf, try_codecs[:],
pane, False)
tasks.append(task)
except (IOError, LookupError) as e:
buf.delete(*buf.get_bounds())
add_dismissable_msg(pane, Gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(e))
yield _("[%s] Reading files") % self.label_text
while len(tasks):
for t in tasks[:]:
try:
nextbit = t.file.read(4096)
if nextbit.find("\x00") != -1:
t.buf.delete(*t.buf.get_bounds())
filename = GObject.markup_escape_text(t.filename)
add_dismissable_msg(t.pane, Gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s appears to be a binary file.") % filename)
tasks.remove(t)
continue
except ValueError as err:
t.codec.pop(0)
if len(t.codec):
t.buf.delete(*t.buf.get_bounds())
t.file = io.open(t.filename, "r", encoding=t.codec[0])
else:
t.buf.delete(*t.buf.get_bounds())
filename = GObject.markup_escape_text(t.filename)
add_dismissable_msg(t.pane, Gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s is not in encodings: %s") %
(filename, try_codecs))
tasks.remove(t)
except IOError as ioerr:
add_dismissable_msg(t.pane, Gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(ioerr))
tasks.remove(t)
else:
# The handling here avoids inserting split CR/LF pairs into
# GtkTextBuffers; this is relevant only when universal
# newline support is unavailable or broken.
if t.was_cr:
nextbit = "\r" + nextbit
t.was_cr = False
if len(nextbit):
if nextbit[-1] == "\r" and len(nextbit) > 1:
t.was_cr = True
nextbit = nextbit[0:-1]
t.buf.insert(t.buf.get_end_iter(), nextbit)
else:
if t.buf.data.savefile:
writable = True
if os.path.exists(t.buf.data.savefile):
writable = os.access(
t.buf.data.savefile, os.W_OK)
else:
writable = os.access(t.filename, os.W_OK)
self.set_buffer_writable(t.buf, writable)
t.buf.data.encoding = t.codec[0]
if hasattr(t.file, "newlines"):
t.buf.data.newlines = t.file.newlines
tasks.remove(t)
yield 1
for b in self.textbuffer:
self.undosequence.checkpoint(b)
b.data.update_mtime()
def _diff_files(self, refresh=False):
yield _("[%s] Computing differences") % self.label_text
texts = self.buffer_filtered[:self.num_panes]
self.linediffer.ignore_blanks = self.props.ignore_blank_lines
step = self.linediffer.set_sequences_iter(texts)
while next(step) is None:
yield 1
if not refresh:
chunk, prev, next_ = self.linediffer.locate_chunk(1, 0)
self.cursor.next = chunk
if self.cursor.next is None:
self.cursor.next = next_
for buf in self.textbuffer:
buf.place_cursor(buf.get_start_iter())
if self.cursor.next is not None:
self.scheduler.add_task(
lambda: self.next_diff(Gdk.ScrollDirection.DOWN, True), True)
else:
buf = self.textbuffer[1 if self.num_panes > 1 else 0]
self.on_cursor_position_changed(buf, None, True)
self.queue_draw()
self._connect_buffer_handlers()
self._set_merge_action_sensitivity()
langs = []
for i in range(self.num_panes):
filename = self.textbuffer[i].data.filename
if filename:
langs.append(LanguageManager.get_language_from_file(filename))
else:
langs.append(None)
# If we have only one identified language then we assume that all of
# the files are actually of that type.
real_langs = [l for l in langs if l]
if real_langs and real_langs.count(real_langs[0]) == len(real_langs):
langs = (real_langs[0],) * len(langs)
for i in range(self.num_panes):
self.textbuffer[i].set_language(langs[i])
def _set_files_internal(self, files):
for i in self._load_files(files, self.textbuffer):
yield i
for i in self._diff_files():
yield i
def set_meta(self, meta):
self.meta = meta
labels = meta.get('labels', ())
if labels:
for i, l in enumerate(labels):
if l:
self.filelabel[i].set_text(l)
self.filelabel_toolitem[i].set_visible(True)
self.fileentry_toolitem[i].set_visible(False)
def notify_file_changed(self, data):
try:
pane = [b.data for b in self.textbuffer].index(data)
except ValueError:
# Notification for unknown buffer
return
gfile = Gio.File.new_for_path(data.filename)
primary = _("File %s has changed on disk") % gfile.get_parse_name()
secondary = _("Do you want to reload the file?")
msgarea = self.msgarea_mgr[pane].new_from_text_and_icon(
Gtk.STOCK_DIALOG_WARNING, primary, secondary)
msgarea.add_button(_("_Reload"), Gtk.ResponseType.ACCEPT)
msgarea.add_button(_("Hi_de"), Gtk.ResponseType.CLOSE)
def on_file_changed_response(msgarea, response_id, *args):
self.msgarea_mgr[pane].clear()
if response_id == Gtk.ResponseType.ACCEPT:
self.on_revert_activate()
msgarea.connect("response", on_file_changed_response)
msgarea.show_all()
def refresh_comparison(self, *args):
"""Refresh the view by clearing and redoing all comparisons"""
self._disconnect_buffer_handlers()
self.linediffer.clear()
for buf in self.textbuffer:
tag = buf.get_tag_table().lookup("inline")
buf.remove_tag(tag, buf.get_start_iter(), buf.get_end_iter())
self.queue_draw()
self.scheduler.add_task(self._diff_files(refresh=True))
def _set_merge_action_sensitivity(self):
pane = self._get_focused_pane()
if pane != -1:
editable = self.textview[pane].get_editable()
mergeable = self.linediffer.has_mergeable_changes(pane)
else:
editable = False
mergeable = (False, False)
self.actiongroup.get_action("MergeFromLeft").set_sensitive(mergeable[0] and editable)
self.actiongroup.get_action("MergeFromRight").set_sensitive(mergeable[1] and editable)
if self.num_panes == 3 and self.textview[1].get_editable():
mergeable = self.linediffer.has_mergeable_changes(1)
else:
mergeable = (False, False)
self.actiongroup.get_action("MergeAll").set_sensitive(mergeable[0] or mergeable[1])
def on_diffs_changed(self, linediffer, chunk_changes):
removed_chunks, added_chunks, modified_chunks = chunk_changes
# We need to clear removed and modified chunks, and need to
# re-highlight added and modified chunks.
need_clearing = sorted(list(removed_chunks))
need_highlighting = sorted(list(added_chunks) + [modified_chunks])
alltags = [b.get_tag_table().lookup("inline") for b in self.textbuffer]
for chunk in need_clearing:
for i, c in enumerate(chunk):
if not c or c[0] != "replace":
continue
to_idx = 2 if i == 1 else 0
bufs = self.textbuffer[1], self.textbuffer[to_idx]
tags = alltags[1], alltags[to_idx]
starts = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[1], c[3]))]
ends = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[2], c[4]))]
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
for chunk in need_highlighting:
clear = chunk == modified_chunks
for i, c in enumerate(chunk):
if not c or c[0] != "replace":
continue
to_idx = 2 if i == 1 else 0
bufs = self.textbuffer[1], self.textbuffer[to_idx]
tags = alltags[1], alltags[to_idx]
starts = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[1], c[3]))]
ends = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[2], c[4]))]
# We don't use self.buffer_texts here, as removing line
# breaks messes with inline highlighting in CRLF cases
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = text_type(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = text_type(textn, 'utf8')
# Bail on long sequences, rather than try a slow comparison
inline_limit = 10000
if len(text1) + len(textn) > inline_limit and \
not self.force_highlight:
for i in range(2):
bufs[i].apply_tag(tags[i], starts[i], ends[i])
self._prompt_long_highlighting()
continue
def apply_highlight(bufs, tags, start_marks, end_marks, texts, matches):
starts = [bufs[0].get_iter_at_mark(start_marks[0]),
bufs[1].get_iter_at_mark(start_marks[1])]
ends = [bufs[0].get_iter_at_mark(end_marks[0]),
bufs[1].get_iter_at_mark(end_marks[1])]
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = text_type(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = text_type(textn, 'utf8')
bufs[0].delete_mark(start_marks[0])
bufs[0].delete_mark(end_marks[0])
bufs[1].delete_mark(start_marks[1])
bufs[1].delete_mark(end_marks[1])
if texts != (text1, textn):
return
offsets = [ends[0].get_offset() - starts[0].get_offset(),
ends[1].get_offset() - starts[1].get_offset()]
def process_matches(match):
if match.tag != "equal":
return True
# Always keep matches occurring at the start or end
start_or_end = (
(match.start_a == 0 and match.start_b == 0) or
(match.end_a == offsets[0] and match.end_b == offsets[1]))
if start_or_end:
return False
# Remove equal matches of size less than 3
too_short = ((match.end_a - match.start_a < 3) or
(match.end_b - match.start_b < 3))
return too_short
matches = [m for m in matches if process_matches(m)]
for i in range(2):
start, end = starts[i].copy(), starts[i].copy()
offset = start.get_offset()
for o in matches:
start.set_offset(offset + o[1 + 2 * i])
end.set_offset(offset + o[2 + 2 * i])
bufs[i].apply_tag(tags[i], start, end)
if clear:
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
starts = [bufs[0].create_mark(None, starts[0], True),
bufs[1].create_mark(None, starts[1], True)]
ends = [bufs[0].create_mark(None, ends[0], True),
bufs[1].create_mark(None, ends[1], True)]
match_cb = functools.partial(apply_highlight, bufs, tags,
starts, ends, (text1, textn))
self._cached_match.match(text1, textn, match_cb)
self._cached_match.clean(self.linediffer.diff_count())
self._set_merge_action_sensitivity()
if self.linediffer.sequences_identical():
error_message = True in [m.has_message() for m in self.msgarea_mgr]
if self.num_panes == 1 or error_message:
return
for index, mgr in enumerate(self.msgarea_mgr):
secondary_text = None
# TODO: Currently this only checks to see whether text filters
# are active, and may be altering the comparison. It would be
# better if we only showed this message if the filters *did*
# change the text in question.
active_filters = any([f.active for f in self.text_filters])
if active_filters:
secondary_text = _("Text filters are being used, and may "
"be masking differences between files. "
"Would you like to compare the "
"unfiltered files?")
msgarea = mgr.new_from_text_and_icon(Gtk.STOCK_INFO,
_("Files are identical"),
secondary_text)
mgr.set_msg_id(FileDiff.MSG_SAME)
button = msgarea.add_button(_("Hide"), Gtk.ResponseType.CLOSE)
if index == 0:
button.props.label = _("Hi_de")
if active_filters:
msgarea.add_button(_("Show without filters"),
Gtk.ResponseType.OK)
msgarea.connect("response", self.on_msgarea_identical_response)
msgarea.show_all()
else:
for m in self.msgarea_mgr:
if m.get_msg_id() == FileDiff.MSG_SAME:
m.clear()
def _prompt_long_highlighting(self):
def on_msgarea_highlighting_response(msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == Gtk.ResponseType.OK:
self.force_highlight = True
self.refresh_comparison()
for index, mgr in enumerate(self.msgarea_mgr):
msgarea = mgr.new_from_text_and_icon(
Gtk.STOCK_INFO,
_("Change highlighting incomplete"),
_("Some changes were not highlighted because they were too "
"large. You can force Meld to take longer to highlight "
"larger changes, though this may be slow."))
mgr.set_msg_id(FileDiff.MSG_SLOW_HIGHLIGHT)
button = msgarea.add_button(_("Hi_de"), Gtk.ResponseType.CLOSE)
if index == 0:
button.props.label = _("Hi_de")
button = msgarea.add_button(
_("Keep highlighting"), Gtk.ResponseType.OK)
if index == 0:
button.props.label = _("_Keep highlighting")
msgarea.connect("response",
on_msgarea_highlighting_response)
msgarea.show_all()
def on_msgarea_identical_response(self, msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == Gtk.ResponseType.OK:
self.text_filters = []
self.refresh_comparison()
def on_textview_draw(self, textview, context):
if self.num_panes == 1:
return
# FIXME: Update to use gtk_cairo_should_draw_window()
# if event.window != textview.get_window(Gtk.TextWindowType.TEXT) \
# and event.window != textview.get_window(Gtk.TextWindowType.LEFT):
# return
# # Hack to redraw the line number gutter used by post-2.10 GtkSourceView
# if event.window == textview.get_window(Gtk.TextWindowType.LEFT) and \
# self.in_nested_textview_gutter_expose:
# self.in_nested_textview_gutter_expose = False
# return
visible = textview.get_visible_rect()
pane = self.textview.index(textview)
textbuffer = textview.get_buffer()
x, y = textview.window_to_buffer_coords(Gtk.TextWindowType.WIDGET,
0, 0)
view_allocation = textview.get_allocation()
bounds = (textview.get_line_num_for_y(y),
textview.get_line_num_for_y(y + view_allocation.height + 1))
width, height = view_allocation.width, view_allocation.height
context.set_line_width(1.0)
for change in self.linediffer.single_changes(pane, bounds):
ypos0 = textview.get_y_for_line_num(change[1]) - visible.y
ypos1 = textview.get_y_for_line_num(change[2]) - visible.y
context.rectangle(-0.5, ypos0 - 0.5, width + 1, ypos1 - ypos0)
if change[1] != change[2]:
context.set_source_rgba(*self.fill_colors[change[0]])
context.fill_preserve()
if self.linediffer.locate_chunk(pane, change[1])[0] == self.cursor.chunk:
h = self.fill_colors['current-chunk-highlight']
context.set_source_rgba(h.red, h.green, h.blue, 0.5)
context.fill_preserve()
context.set_source_rgba(*self.line_colors[change[0]])
context.stroke()
if (self.props.highlight_current_line and textview.is_focus() and
self.cursor.line is not None):
it = textbuffer.get_iter_at_line(self.cursor.line)
ypos, line_height = textview.get_line_yrange(it)
context.save()
context.rectangle(0, ypos - visible.y, width, line_height)
context.clip()
context.set_source_rgba(*self.highlight_color)
context.paint_with_alpha(0.25)
context.restore()
for syncpoint in [p[pane] for p in self.syncpoints]:
if not syncpoint:
continue
syncline = textbuffer.get_iter_at_mark(syncpoint).get_line()
if bounds[0] <= syncline <= bounds[1]:
ypos = textview.get_y_for_line_num(syncline) - visible.y
context.rectangle(-0.5, ypos - 0.5, width + 1, 1)
context.set_source_rgba(*self.syncpoint_color)
context.stroke()
new_anim_chunks = []
for c in self.animating_chunks[pane]:
current_time = GLib.get_monotonic_time()
percent = min(1.0, (current_time - c.start_time) / float(c.duration))
rgba_pairs = zip(c.start_rgba, c.end_rgba)
rgba = [s + (e - s) * percent for s, e in rgba_pairs]
it = textbuffer.get_iter_at_mark(c.start_mark)
ystart, _ = textview.get_line_yrange(it)
it = textbuffer.get_iter_at_mark(c.end_mark)
yend, _ = textview.get_line_yrange(it)
if ystart == yend:
ystart -= 1
context.set_source_rgba(*rgba)
context.rectangle(0, ystart - visible.y, width, yend - ystart)
context.fill()
if current_time <= c.start_time + c.duration:
new_anim_chunks.append(c)
else:
textbuffer.delete_mark(c.start_mark)
textbuffer.delete_mark(c.end_mark)
self.animating_chunks[pane] = new_anim_chunks
if self.animating_chunks[pane] and self.anim_source_id[pane] is None:
def anim_cb():
textview.queue_draw()
return True
# Using timeout_add interferes with recalculation of inline
# highlighting; this mechanism could be improved.
self.anim_source_id[pane] = GLib.idle_add(anim_cb)
elif not self.animating_chunks[pane] and self.anim_source_id[pane]:
GLib.source_remove(self.anim_source_id[pane])
self.anim_source_id[pane] = None
# if event.window == textview.get_window(Gtk.TextWindowType.LEFT):
# self.in_nested_textview_gutter_expose = True
# textview.emit("expose-event", event)
def _get_filename_for_saving(self, title ):
dialog = Gtk.FileChooserDialog(title,
parent=self.widget.get_toplevel(),
action=Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK) )
dialog.set_default_response(Gtk.ResponseType.OK)
response = dialog.run()
filename = None
if response == Gtk.ResponseType.OK:
filename = dialog.get_filename()
dialog.destroy()
if filename:
if os.path.exists(filename):
response = misc.run_dialog(
_('"%s" exists!\nOverwrite?') % os.path.basename(filename),
parent = self,
buttonstype = Gtk.ButtonsType.YES_NO)
if response == Gtk.ResponseType.NO:
return None
return filename
return None
def _save_text_to_filename(self, filename, text):
try:
open(filename, "wb").write(text)
except IOError as e:
misc.run_dialog(
_("Error writing to %s\n\n%s.") % (filename, e),
self, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK)
return False
return True
def save_file(self, pane, saveas=False, force_overwrite=False):
buf = self.textbuffer[pane]
bufdata = buf.data
if saveas or not (bufdata.filename or bufdata.savefile) \
or not bufdata.writable:
if pane == 0:
prompt = _("Save Left Pane As")
elif pane == 1 and self.num_panes == 3:
prompt = _("Save Middle Pane As")
else:
prompt = _("Save Right Pane As")
filename = self._get_filename_for_saving(prompt)
if filename:
bufdata.filename = bufdata.label = os.path.abspath(filename)
bufdata.savefile = None
self.fileentry[pane].set_filename(bufdata.filename)
self.filelabel_toolitem[pane].set_visible(False)
self.fileentry_toolitem[pane].set_visible(True)
else:
return False
if not force_overwrite and not bufdata.current_on_disk():
gfile = Gio.File.new_for_path(bufdata.filename)
primary = _("File %s has changed on disk since it was opened") % \
gfile.get_parse_name()
secondary = _("If you save it, any external changes will be lost.")
msgarea = self.msgarea_mgr[pane].new_from_text_and_icon(
Gtk.STOCK_DIALOG_WARNING, primary, secondary)
msgarea.add_button(_("Save Anyway"), Gtk.ResponseType.ACCEPT)
msgarea.add_button(_("Don't Save"), Gtk.ResponseType.CLOSE)
def on_file_changed_response(msgarea, response_id, *args):
self.msgarea_mgr[pane].clear()
if response_id == Gtk.ResponseType.ACCEPT:
self.save_file(pane, saveas, force_overwrite=True)
msgarea.connect("response", on_file_changed_response)
msgarea.show_all()
return
start, end = buf.get_bounds()
text = text_type(buf.get_text(start, end, False), 'utf8')
if bufdata.newlines:
if isinstance(bufdata.newlines, basestring):
if bufdata.newlines != '\n':
text = text.replace("\n", bufdata.newlines)
else:
buttons = {
'\n': ("UNIX (LF)", 0),
'\r\n': ("DOS/Windows (CR-LF)", 1),
'\r': ("Mac OS (CR)", 2),
}
newline = misc.run_dialog( _("This file '%s' contains a mixture of line endings.\n\nWhich format would you like to use?") % bufdata.label,
self, Gtk.MessageType.WARNING, buttonstype=Gtk.ButtonsType.CANCEL,
extrabuttons=[ buttons[b] for b in bufdata.newlines ] )
if newline < 0:
return
for k,v in buttons.items():
if v[1] == newline:
bufdata.newlines = k
if k != '\n':
text = text.replace('\n', k)
break
if bufdata.encoding:
try:
text = text.encode(bufdata.encoding)
except UnicodeEncodeError:
if misc.run_dialog(
_("'%s' contains characters not encodable with '%s'\nWould you like to save as UTF-8?") % (bufdata.label, bufdata.encoding),
self, Gtk.MessageType.ERROR, Gtk.ButtonsType.YES_NO) != Gtk.ResponseType.YES:
return False
save_to = bufdata.savefile or bufdata.filename
if self._save_text_to_filename(save_to, text):
self.emit("file-changed", save_to)
self.undosequence.checkpoint(buf)
bufdata.update_mtime()
if pane == 1 and self.num_panes == 3:
self.meta['middle_saved'] = True
return True
else:
return False
def make_patch(self, *extra):
dialog = patchdialog.PatchDialog(self)
dialog.run()
def set_buffer_writable(self, buf, writable):
buf.data.writable = writable
self.recompute_label()
index = self.textbuffer.index(buf)
self.readonlytoggle[index].props.visible = not writable
self.set_buffer_editable(buf, writable)
def set_buffer_modified(self, buf, yesno):
buf.data.modified = yesno
self.recompute_label()
def set_buffer_editable(self, buf, editable):
buf.data.editable = editable
index = self.textbuffer.index(buf)
self.readonlytoggle[index].set_active(not editable)
self.textview[index].set_editable(editable)
self.on_cursor_position_changed(buf, None, True)
for linkmap in self.linkmap:
linkmap.queue_draw()
def save(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane)
def save_as(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane, True)
def on_save_all_activate(self, action):
for i in range(self.num_panes):
if self.textbuffer[i].data.modified:
self.save_file(i)
def on_file_save_button_clicked(self, button):
idx = self.file_save_button.index(button)
self.save_file(idx)
def on_fileentry_file_set(self, entry):
if self.check_save_modified() != Gtk.ResponseType.CANCEL:
entries = self.fileentry[:self.num_panes]
files = [e.get_file() for e in entries]
paths = [f.get_path() for f in files]
self.set_files(paths)
return True
def _get_focused_pane(self):
for i in range(self.num_panes):
if self.textview[i].is_focus():
return i
return -1
def on_revert_activate(self, *extra):
response = Gtk.ResponseType.OK
unsaved = [b.data.label for b in self.textbuffer if b.data.modified]
if unsaved:
dialog = gnomeglade.Component("filediff.ui", "revert_dialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
# FIXME: Should be packed into dialog.widget.get_message_area(),
# but this is unbound on currently required PyGTK.
filelist = "\n".join(["\t" + f for f in unsaved])
dialog.widget.props.secondary_text += filelist
response = dialog.widget.run()
dialog.widget.destroy()
if response == Gtk.ResponseType.OK:
files = [b.data.filename for b in self.textbuffer[:self.num_panes]]
self.set_files(files)
def on_refresh_activate(self, *extra):
self.refresh_comparison()
def queue_draw(self, junk=None):
for t in self.textview:
t.queue_draw()
for i in range(self.num_panes-1):
self.linkmap[i].queue_draw()
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
def on_action_lock_scrolling_toggled(self, action):
self.toggle_scroll_lock(action.get_active())
def toggle_scroll_lock(self, locked):
self.actiongroup.get_action("LockScrolling").set_active(locked)
self._scroll_lock = not locked
def on_readonly_button_toggled(self, button):
index = self.readonlytoggle.index(button)
buf = self.textbuffer[index]
self.set_buffer_editable(buf, not button.get_active())
#
# scrollbars
#
def _sync_hscroll(self, adjustment):
if self._sync_hscroll_lock or self._scroll_lock:
return
self._sync_hscroll_lock = True
val = adjustment.get_value()
for sw in self.scrolledwindow[:self.num_panes]:
adj = sw.get_hadjustment()
if adj is not adjustment:
adj.set_value(val)
self._sync_hscroll_lock = False
def _sync_vscroll(self, adjustment, master):
# only allow one scrollbar to be here at a time
if self._sync_vscroll_lock:
return
if not self._scroll_lock and (self.keymask & MASK_SHIFT) == 0:
self._sync_vscroll_lock = True
syncpoint = 0.5
# the line to search for in the 'master' text
master_y = (adjustment.get_value() + adjustment.get_page_size() *
syncpoint)
it = self.textview[master].get_line_at_y(int(master_y))[0]
line_y, height = self.textview[master].get_line_yrange(it)
line = it.get_line() + ((master_y-line_y)/height)
# scrollbar influence 0->1->2 or 0<-1->2 or 0<-1<-2
scrollbar_influence = ((1, 2), (0, 2), (1, 0))
for i in scrollbar_influence[master][:self.num_panes - 1]:
adj = self.scrolledwindow[i].get_vadjustment()
mbegin, mend = 0, self.textbuffer[master].get_line_count()
obegin, oend = 0, self.textbuffer[i].get_line_count()
# look for the chunk containing 'line'
for c in self.linediffer.pair_changes(master, i):
if c[1] >= line:
mend = c[1]
oend = c[3]
break
elif c[2] >= line:
mbegin, mend = c[1], c[2]
obegin, oend = c[3], c[4]
break
else:
mbegin = c[2]
obegin = c[4]
fraction = (line - mbegin) / ((mend - mbegin) or 1)
other_line = (obegin + fraction * (oend - obegin))
it = self.textbuffer[i].get_iter_at_line(int(other_line))
val, height = self.textview[i].get_line_yrange(it)
val -= (adj.get_page_size()) * syncpoint
val += (other_line-int(other_line)) * height
val = min(max(val, adj.get_lower()),
adj.get_upper() - adj.get_page_size())
adj.set_value(val)
# If we just changed the central bar, make it the master
if i == 1:
master, line = 1, other_line
self._sync_vscroll_lock = False
for lm in self.linkmap:
lm.queue_draw()
def set_num_panes(self, n):
if n != self.num_panes and n in (1, 2, 3):
self.num_panes = n
for widget in (
self.vbox[:n] + self.file_toolbar[:n] + self.diffmap[:n] +
self.linkmap[:n - 1] + self.dummy_toolbar_linkmap[:n - 1] +
self.dummy_toolbar_diffmap[:n - 1]):
widget.show()
for widget in (
self.vbox[n:] + self.file_toolbar[n:] + self.diffmap[n:] +
self.linkmap[n - 1:] + self.dummy_toolbar_linkmap[n - 1:] +
self.dummy_toolbar_diffmap[n - 1:]):
widget.hide()
self.actiongroup.get_action("MakePatch").set_sensitive(n > 1)
self.actiongroup.get_action("CycleDocuments").set_sensitive(n > 1)
def coords_iter(i):
buf_index = 2 if i == 1 and self.num_panes == 3 else i
get_end_iter = self.textbuffer[buf_index].get_end_iter
get_iter_at_line = self.textbuffer[buf_index].get_iter_at_line
get_line_yrange = self.textview[buf_index].get_line_yrange
def coords_by_chunk():
y, h = get_line_yrange(get_end_iter())
max_y = float(y + h)
for c in self.linediffer.single_changes(i):
y0, _ = get_line_yrange(get_iter_at_line(c[1]))
if c[1] == c[2]:
y, h = y0, 0
else:
y, h = get_line_yrange(get_iter_at_line(c[2] - 1))
yield c[0], y0 / max_y, (y + h) / max_y
return coords_by_chunk
for (w, i) in zip(self.diffmap, (0, self.num_panes - 1)):
scroll = self.scrolledwindow[i].get_vscrollbar()
w.setup(scroll, coords_iter(i), [self.fill_colors, self.line_colors])
for (w, i) in zip(self.linkmap, (0, self.num_panes - 2)):
w.associate(self, self.textview[i], self.textview[i + 1])
for i in range(self.num_panes):
self.file_save_button[i].set_sensitive(
self.textbuffer[i].data.modified)
self.queue_draw()
self.recompute_label()
def next_diff(self, direction, centered=False):
target = (self.cursor.next if direction == Gdk.ScrollDirection.DOWN
else self.cursor.prev)
if target is None:
return
pane = self._get_focused_pane()
if pane == -1:
if len(self.textview) > 1:
pane = 1
else:
pane = 0
chunk = self.linediffer.get_chunk(target, pane)
if not chunk:
return
# Warp the cursor to the first line of next chunk
buf = self.textbuffer[pane]
if self.cursor.line != chunk[1]:
buf.place_cursor(buf.get_iter_at_line(chunk[1]))
tolerance = 0.0 if centered else 0.2
self.textview[pane].scroll_to_mark(
buf.get_insert(), tolerance, True, 0.5, 0.5)
def copy_chunk(self, src, dst, chunk, copy_up):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
start = b0.get_iter_at_line_or_eof(chunk[1])
end = b0.get_iter_at_line_or_eof(chunk[2])
t0 = text_type(b0.get_text(start, end, False), 'utf8')
if copy_up:
if chunk[2] >= b0.get_line_count() and \
chunk[3] < b1.get_line_count():
# TODO: We need to insert a linebreak here, but there is no
# way to be certain what kind of linebreak to use.
t0 = t0 + "\n"
dst_start = b1.get_iter_at_line_or_eof(chunk[3])
mark0 = b1.create_mark(None, dst_start, True)
new_end = b1.insert_at_line(chunk[3], t0)
else: # copy down
dst_start = b1.get_iter_at_line_or_eof(chunk[4])
mark0 = b1.create_mark(None, dst_start, True)
new_end = b1.insert_at_line(chunk[4], t0)
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = self.fill_colors['insert'].copy()
rgba1 = self.fill_colors['insert'].copy()
rgba0.alpha = 1.0
rgba1.alpha = 0.0
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 500000)
self.animating_chunks[dst].append(anim)
def replace_chunk(self, src, dst, chunk):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
src_start = b0.get_iter_at_line_or_eof(chunk[1])
src_end = b0.get_iter_at_line_or_eof(chunk[2])
dst_start = b1.get_iter_at_line_or_eof(chunk[3])
dst_end = b1.get_iter_at_line_or_eof(chunk[4])
t0 = text_type(b0.get_text(src_start, src_end, False), 'utf8')
mark0 = b1.create_mark(None, dst_start, True)
self.on_textbuffer_begin_user_action()
b1.delete(dst_start, dst_end)
new_end = b1.insert_at_line(chunk[3], t0)
self.on_textbuffer_end_user_action()
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = self.fill_colors['insert'].copy()
rgba1 = self.fill_colors['insert'].copy()
rgba0.alpha = 1.0
rgba1.alpha = 0.0
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 500000)
self.animating_chunks[dst].append(anim)
def delete_chunk(self, src, chunk):
b0 = self.textbuffer[src]
it = b0.get_iter_at_line_or_eof(chunk[1])
if chunk[2] >= b0.get_line_count():
it.backward_char()
b0.delete(it, b0.get_iter_at_line_or_eof(chunk[2]))
mark0 = b0.create_mark(None, it, True)
mark1 = b0.create_mark(None, it, True)
# TODO: Need a more specific colour here; conflict is wrong
rgba0 = self.fill_colors['conflict'].copy()
rgba1 = self.fill_colors['conflict'].copy()
rgba0.alpha = 1.0
rgba1.alpha = 0.0
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 500000)
self.animating_chunks[src].append(anim)
def add_sync_point(self, action):
pane = self._get_focused_pane()
if pane == -1:
return
# Find a non-complete syncpoint, or create a new one
if self.syncpoints and None in self.syncpoints[-1]:
syncpoint = self.syncpoints.pop()
else:
syncpoint = [None] * self.num_panes
cursor_it = self.textbuffer[pane].get_iter_at_mark(
self.textbuffer[pane].get_insert())
syncpoint[pane] = self.textbuffer[pane].create_mark(None, cursor_it)
self.syncpoints.append(syncpoint)
def make_line_retriever(pane, marks):
buf = self.textbuffer[pane]
mark = marks[pane]
def get_line_for_mark():
return buf.get_iter_at_mark(mark).get_line()
return get_line_for_mark
valid_points = [p for p in self.syncpoints if all(p)]
if valid_points and self.num_panes == 2:
self.linediffer.syncpoints = [
((make_line_retriever(1, p), make_line_retriever(0, p)), )
for p in valid_points
]
elif valid_points and self.num_panes == 3:
self.linediffer.syncpoints = [
((make_line_retriever(1, p), make_line_retriever(0, p)),
(make_line_retriever(1, p), make_line_retriever(2, p)))
for p in valid_points
]
if valid_points:
for mgr in self.msgarea_mgr:
msgarea = mgr.new_from_text_and_icon(
Gtk.STOCK_DIALOG_INFO,
_("Live comparison updating disabled"),
_("Live updating of comparisons is disabled when "
"synchronization points are active. You can still "
"manually refresh the comparison, and live updates will "
"resume when synchronization points are cleared."))
mgr.set_msg_id(FileDiff.MSG_SYNCPOINTS)
msgarea.show_all()
self.refresh_comparison()
def clear_sync_points(self, action):
self.syncpoints = []
self.linediffer.syncpoints = []
for mgr in self.msgarea_mgr:
if mgr.get_msg_id() == FileDiff.MSG_SYNCPOINTS:
mgr.clear()
self.refresh_comparison()
| albfan/PatienceMeld | meld/filediff.py | Python | gpl-2.0 | 90,854 |
r"""
Packaging setup file for Pypi and installation via pip.
Notes
-----
#. Written by David C. Stauffer in January 2019.
"""
#%% Import
import os
from setuptools import setup
#%% Support functions - readme
def readme():
r"""Opens the README.rst file for additional descriptions."""
filename = os.path.join(os.path.dirname(__file__), 'README.rst')
with open(filename) as file:
return file.read()
#%% Support functions - get_version
def get_version():
r"""Reads the version information from the library."""
filename = os.path.join(os.path.dirname(__file__), 'dstauffman2', 'version.py')
with open(filename) as file:
text = file.read()
for line in text.splitlines():
if line.startswith('version_info = '):
return line.split('(')[1].split(')')[0].replace(', ', '.')
raise RuntimeError('Unable to load version information.')
#%% Setup
setup(
name='dstauffman2',
version=get_version(),
description='Vaguely useful Python utilities, plus a playground for games and miscelllaneous code.',
long_description=readme(),
keywords='dstauffman dstauffman2 games playground spyder configuration',
url='https://github.com/dstauffman/dstauffman2',
author='David C. Stauffer',
author_email='[email protected]',
license='LGPLv3',
packages=['dstauffman2'],
package_data={'dstauffman2': ['py.typed']},
install_requires=[
'h5py',
'matplotlib',
'numpy',
'pandas',
'PyQt5',
'pytest',
'scipy',
],
python_requires='>=3.8',
include_package_data=True,
zip_safe=False)
| DStauffman/dstauffman2 | setup.py | Python | lgpl-3.0 | 1,638 |
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from datetime import timedelta
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth import get_user_model
from django.utils import timezone
from httmock import urlmatch, response, HTTMock
from common.tests import access_token_mock
from remind.serializers import RemindSerializer, TimestampField
from remind.models import Remind
@urlmatch(netloc=r'(.*\.)?api\.weixin\.qq\.com$', path='.*qrcode/create')
def create_qrcode_mock(url, request):
content = {
'ticket': 'gQH47joAAAAAAAAAASxodHRwOi8vd2VpeGluLnFxLmNvbS9xL2taZ2Z3TVRtNzJXV1Brb3ZhYmJJAAIEZ23sUwMEmm3sUw==',
'expires_in': 30,
'url': 'http:\/\/weixin.qq.com\/q\/kZgfwMTm72WWPkovabbI',
}
headers = {
'Content-Type': 'application/json'
}
return response(200, content, headers, request=request)
class RemindSerializerTestCase(TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
user = get_user_model()(openid='miao', nickname='abc')
user.save()
self.request.user = user
self.r = Remind(time=timezone.now(), owner_id='miao', event='吃饭', desc='吃饭饭', done=True)
self.r.save()
def test_change_defer(self):
update_data = {
'title': self.r.event,
'time': TimestampField().to_representation(self.r.time),
'defer': -2*60
}
self.assertTrue(self.r.done)
serializer = RemindSerializer(data=update_data, instance=self.r, partial=True)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertFalse(self.r.done)
self.assertEqual(self.r.defer, -2*60)
self.assertEqual(self.r.notify_time, self.r.time+timedelta(minutes=self.r.defer))
update_data['defer'] = 2*24*60
serializer = RemindSerializer(data=update_data, instance=self.r, partial=True)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(self.r.defer, 2*24*60)
def test_uuid_format(self):
serializer = RemindSerializer(instance=self.r, context={'request': self.request})
with HTTMock(access_token_mock, create_qrcode_mock):
self.assertRegexpMatches(serializer.data['id'], r'\w{32}')
def test_default_title(self):
r = Remind(time=timezone.now(), owner_id='miao', desc='吃饭饭', done=True)
r.save()
serializer = RemindSerializer(instance=r, context={'request': self.request})
with HTTMock(access_token_mock, create_qrcode_mock):
self.assertEqual(serializer.data['title'], Remind.default_title)
def test_read_only_fields(self):
update_data = {
'id': '123',
'owner': {
'id': 'miao'
},
'title': self.r.event,
'time': TimestampField().to_representation(self.r.time),
'aaa': 1
}
serializer = RemindSerializer(data=update_data, initial=self.r, partial=True)
self.assertTrue(serializer.is_valid())
self.assertNotIn('id', serializer.validated_data)
self.assertNotIn('owner', serializer.validated_data)
self.assertNotIn('aaa', serializer.validated_data) | polyrabbit/WeCron | WeCron/remind/tests/test_serializers.py | Python | gpl-3.0 | 3,317 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
import pytest
from inspirehep.modules.search import IQ
def test_empty():
query = IQ('')
expected = {
'multi_match': {
'zero_terms_query': 'all',
'query': '',
'fields': [
'title^3',
'title.raw^10',
'abstract^2',
'abstract.raw^4',
'author^10',
'author.raw^15',
'reportnumber^10',
'eprint^10',
'doi^10'
]
}
}
result = query.to_dict()
assert expected == result
def test_google_style():
query = IQ('kudenko')
expected = {
'multi_match': {
'zero_terms_query': 'all',
'query': 'kudenko',
'fields': [
'title^3',
'title.raw^10',
'abstract^2',
'abstract.raw^4',
'author^10',
'author.raw^15',
'reportnumber^10',
'eprint^10',
'doi^10'
]
}
}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_google_style_or_google_style():
query = IQ('sungtae cho or 1301.7261')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_google_style_and_not_collaboration():
query = IQ("raffaele d'agnolo and not cn cms")
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_author():
query = IQ('a kondrashuk')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_author_bai():
query = IQ('a r.j.hill.1')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_author_or_author():
query = IQ('a fileviez perez,p or p. f. perez')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_author_and_not_author():
query = IQ('a espinosa,jose r and not a rodriguez espinosa')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_author_and_not_type_code():
query = IQ('a nilles,h and not tc I')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def author_or_author_and_not_collaborations_and_not_title_and_not_type_code():
query = IQ(
'a rojo,j. or rojo-chacon,j. and not collaboration pierre auger '
'and not collaboration auger and not t auger and tc p')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_exactauthor():
query = IQ('ea wu, xing gang')
expected = {}
result = query.to_dict()
assert expected == result
def test_abstract_colon_with_star_wildcard():
query = IQ('abstract: part*')
expected = {
'query_string': {
'query': 'part*',
'default_field': 'abstracts.value',
'analyze_wildcard': True
}
}
result = query.to_dict()
assert expected == result
def test_author_colon():
query = IQ('author: vagenas')
expected = {
'bool': {
'should': [
{'match': {'authors.name_variations': 'vagenas'}},
{'match': {'authors.full_name': 'vagenas'}}
]
}
}
result = query.to_dict()
assert expected == result
def test_author_colon_with_double_quotes():
query = IQ('author:"tachikawa, yuji"')
expected = {
'bool': {
'must': [
{'match': {'authors.name_variations': 'tachikawa, yuji'}}
],
'should': [
{'match': {'authors.full_name': 'tachikawa, yuji'}}
]
}
}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_author_colon_bai():
query = IQ('author:Y.Nomura.1')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_author_colon_bai_and_collection_colon():
query = IQ(
'author:E.Witten.1 AND collection:citeable')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_author_colon_bai_with_double_quotes_and_collection_colon():
query = IQ('author:"E.Witten.1" AND collection:citeable')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_author_colon_bai_and_collection_colon_and_cited_colon():
query = IQ(
'author:E.Witten.1 AND collection:citeable AND cited:500->1000000')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_author_colon_bai_with_double_quotes_and_collection_colon_and_cited_colon():
query = IQ(
'author:"E.Witten.1" AND collection:citeable AND cited:500->1000000')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_author_colon_or_eprint_without_keyword():
query = IQ('author:"Takayanagi, Tadashi" or hep-th/0010101')
expected = {}
result = query.to_dict()
assert expected == result
def test_author_colon_or_author_colon_or_title_colon_or_title_colon():
query = IQ(
"(author:'Hiroshi Okada' OR (author:'H Okada' hep-ph) OR "
"title: 'Dark matter in supersymmetric U(1(B-L) model' OR "
"title: 'Non-Abelian discrete symmetry for flavors')")
expected = {
'bool': {
'should': [
{
'bool': {
'should': [
{
'bool': {
'should': [
{
'bool': {
'must': [
{
'multi_match': {
'query': 'H Okada',
'type': 'phrase',
'fields': [
'authors.full_name',
'authors.alternative_name'
]
}
},
{
'multi_match': {
'query': 'hep-ph',
'fields': [
'global_fulltext'
]
}
}
]
}
},
{
'multi_match': {
'query': 'Hiroshi Okada',
'type': 'phrase',
'fields': [
'authors.full_name',
'authors.alternative_name'
],
}
}
]
}
},
{
'multi_match': {
'query': 'Dark matter in supersymmetric U(1(B-L) model',
'type': 'phrase',
'fields': [
'titles.title',
'titles.title.raw^2',
'title_translation.title',
'title_variation',
'title_translation.subtitle',
'titles.subtitle'
]
}
}
]
}
},
{
'multi_match': {
'query': 'Non-Abelian discrete symmetry for flavors',
'type': 'phrase',
'fields': [
'titles.title',
'titles.title.raw^2',
'title_translation.title',
'title_variation',
'title_translation.subtitle',
'titles.subtitle'
],
}
}
]
}
}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_citedby_colon():
query = IQ('citedby:foobar')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_citedby_colon_recid_colon():
query = IQ('citedby:recid:902780')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_eprint_colon_with_arxiv():
query = IQ('eprint:arxiv:TODO')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_eprint_colon_without_arxiv():
query = IQ('eprint:TODO')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_exactauthor_colon():
query = IQ('ea:matt visser')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='query is malformed, but user intent is clear')
def test_exactauthor_colon_and_collection_colon():
query = IQ('ea: matt visser AND collection:citeable')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_exactauthor_colon_bai():
query = IQ('exactauthor:J.Serra.3')
expected = {}
result = query.to_dict()
assert expected == result
def test_field_code_colon():
query = IQ('fc: a')
expected = {'multi_match': {'query': 'a', 'fields': ['field_code']}}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_or_of_exactauthor_colon_queries():
query = IQ('exactauthor:X.Yin.1 or exactauthor:"Yin, Xi"')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_fulltext_colon():
query = IQ('fulltext:TODO')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_journal_colon():
query = IQ('journal:TODO')
expected = {}
result = query.to_dict()
assert expected == result
def test_refersto_colon_recid_colon():
query = IQ('refersto:recid:1286113')
expected = {
'multi_match': {
'query': '1286113',
'fields': [
'references.recid'
]
}
}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_topcite_colon():
query = IQ('topcite:200+')
expected = {}
result = query.to_dict()
assert expected == result
def test_type_code_colon():
query = IQ('tc: l')
expected = {'multi_match': {'query': 'l', 'fields': ['collection']}}
result = query.to_dict()
assert expected == result
def test_find_author_with_hash_wildcard():
query = IQ('find a chkv#')
expected = {
'bool': {
'should': [{
'query_string': {
'analyze_wildcard': True,
'default_field': 'authors.full_name',
'query': 'chkv*'}}, {
'query_string': {
'analyze_wildcard': True,
'default_field': 'authors.alternative_name',
'query': 'chkv*'}}
]}
}
result = query.to_dict()
assert expected == result
def test_find_journal():
query = IQ('find j "Phys.Rev.Lett.,105*"')
expected = {
'multi_match': {
'query': '"Phys.Rev.Lett.,105*"',
'fields': [
'publication_info.recid',
'publication_info.page_artid',
'publication_info.journal_issue',
'publication_info.conf_acronym',
'publication_info.journal_title',
'publication_info.reportnumber',
'publication_info.confpaper_info',
'publication_info.journal_volume',
'publication_info.cnum',
'publication_info.pubinfo_freetext',
'publication_info.year_raw',
'publication_info.isbn',
'publication_info.note'
]
}
}
result = query.to_dict()
assert expected == result
def test_find_exactauthor():
query = IQ('find ea witten, edward')
expected = {
'multi_match': {
'query': 'witten, edward',
'fields': [
'exactauthor.raw',
'authors.full_name',
'authors.alternative_name'
]
}
}
result = query.to_dict()
assert expected == result
def test_find_exactauthor_not_affiliation_uppercase():
query = IQ(
'FIND EA RINALDI, MASSIMILIANO NOT AFF SINCROTRONE TRIESTE')
expected = {
"bool": {
"must_not": [
{
"multi_match": {
"query": "SINCROTRONE TRIESTE",
"fields": [
"authors.affiliations.value",
"corporate_author"
]
}
}
],
"must": [
{
"multi_match": {
"query": "RINALDI, MASSIMILIANO",
"fields": [
"exactauthor.raw",
"authors.full_name",
"authors.alternative_name"
]
}
}
]
}
}
result = query.to_dict()
assert expected == result
def test_find_author():
query = IQ('find a polchinski')
expected = {
'bool': {
'must': [
{'match': {'authors.name_variations': 'polchinski'}}
],
'should': [
{'match': {'authors.full_name': 'polchinski'}}
]
}
}
result = query.to_dict()
assert expected == result
def test_find_author_uppercase():
query = IQ('FIND A W F CHANG')
expected = {
'bool': {
'must': [
{'match': {'authors.name_variations': 'W F CHANG'}}
],
'should': [
{'match': {'authors.full_name': 'W F CHANG'}}
]
}
}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='BAI is not part of the mappings')
def test_find_author_bai():
query = IQ('find a B.R.Safdi.1')
expected = {}
result = query.to_dict()
assert expected == result
def test_find_author_and_date():
query = IQ('find a hatta and date after 2000')
expected = {
"bool": {
"minimum_should_match": 0,
"should": [
{
"match": {
"authors.full_name": "hatta"
}
}
],
"must": [
{
"match": {
"authors.name_variations": "hatta"
}
},
{
"bool": {
"minimum_should_match": 1,
"should": [
{
"bool": {
"should": [
{
"bool": {
"should": [
{
"range": {
"imprints.date": {
"gt": "2000"
}
}
},
{
"range": {
"preprint_date": {
"gt": "2000"
}
}
}
]
}
},
{
"range": {
"thesis.date": {
"gt": "2000"
}
}
}
]
}
},
{
"range": {
"publication_info.year": {
"gt": "2000"
}
}
}
]
}
}
]
}
}
result = query.to_dict()
assert expected == result
def test_find_author_or_author():
query = IQ('find a gersdorff, g or a von gersdorff, g')
expected = {
'bool': {
'should': [
{
'bool': {
'must': [
{'match': {'authors.name_variations': 'gersdorff, g'}}
],
'should': [
{'match': {'authors.full_name': 'gersdorff, g'}}
]
}
},
{
'bool': {
'must': [
{'match': {'authors.name_variations': 'von gersdorff, g'}}
],
'should': [
{'match': {'authors.full_name': 'von gersdorff, g'}} ]
}
}
]
}
}
result = query.to_dict()
assert expected == result
def test_find_author_not_author_not_author():
query = IQ('f a ostapchenko not olinto not haungs')
expected = {
"bool": {
"minimum_should_match": 0,
"must": [
{
"match": {
"authors.name_variations": "ostapchenko"
}
}
],
"must_not": [
{
"bool": {
"should": [
{
"match": {
"authors.full_name": "olinto"
}
}
],
"must": [
{
"match": {
"authors.name_variations": "olinto"
}
}
]
}
},
{
"bool": {
"must": [
{
"match": {
"authors.name_variations": "haungs"
}
}
],
"should": [
{
"match": {
"authors.full_name": "haungs"
}
}
]
}
}
],
"should": [
{
"match": {
"authors.full_name": "ostapchenko"
}
}
]
}
}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_find_caption():
query = IQ('Diagram for the fermion flow violating process')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_find_country_code():
query = IQ('find cc italy')
expected = {}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='today must be converted to an actual date')
def test_find_date():
query = IQ('fin date > today')
expected = {}
result = query.to_dict()
assert expected == result
def test_find_field_code():
query = IQ('find fc a')
expected = {'multi_match': {'query': 'a', 'fields': ['field_code']}}
result = query.to_dict()
assert expected == result
@pytest.mark.xfail(reason='tracked in issue #817')
def test_find_report():
query = IQ('find r atlas-conf-*')
expected = {}
result = query.to_dict()
assert expected == result
def test_find_type_code():
query = IQ('find tc book')
expected = {'multi_match': {'query': 'book', 'fields': ['collection']}}
result = query.to_dict()
assert expected == result
| nikpap/inspire-next | tests/unit/search/test_search_query.py | Python | gpl-2.0 | 25,098 |
from selenium import webdriver
from Tkinter import *
from PIL import Image
driver = webdriver.Chrome()
driver.set_window_size(500, 500)
driver.get('https://www.google.com')
while(1):
pass
driver.save_screenshot('temp.png')
driver.quit()
img = Image.open('temp.png')
img_tk = ImageTk.PhotoImage(Image.open('temp.png'))
root = Tk()
root.title('Ircbot')
root.geometry('500x500')
panel = tk.Label(window, image = img)
panel.pack(side='bottom', fill='both', expand='yes')
root.mainloop()
root.destroy()
| jbzdarkid/Random | webbrower.py | Python | apache-2.0 | 529 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.