hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ae5c492e4c7a58f8381ae12e47edd808dc70752
| 1,912
|
py
|
Python
|
gluon/contrib/memcache/__init__.py
|
arsfeld/fog-web2py
|
32263a03d4183dcaf7537c87edcb4e574d4bec6e
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/contrib/memcache/__init__.py
|
arsfeld/fog-web2py
|
32263a03d4183dcaf7537c87edcb4e574d4bec6e
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/contrib/memcache/__init__.py
|
arsfeld/fog-web2py
|
32263a03d4183dcaf7537c87edcb4e574d4bec6e
|
[
"BSD-3-Clause"
] | 1
|
2019-03-13T08:20:25.000Z
|
2019-03-13T08:20:25.000Z
|
from gluon.contrib.memcache.memcache import Client
import time
"""
examle of usage:
cache.memcache=MemcacheClient(request,[127.0.0.1:11211],debug=true)
"""
import cPickle as pickle
import thread
locker = thread.allocate_lock()
def MemcacheClient(*a, **b):
locker.acquire()
if not hasattr(MemcacheClient, '__mc_instance'):
MemcacheClient.__mc_instance = _MemcacheClient(*a, **b)
locker.release()
return MemcacheClient.__mc_instance
class _MemcacheClient(Client):
def __init__(self, request, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None):
self.request=request
Client.__init__(self,servers,debug,pickleProtocol,
pickler,unpickler,pload,pid)
def __call__(self,key,f,time_expire=300):
#key=self.__keyFormat__(key)
value=None
obj=self.get(key)
if obj:
value=obj
elif f is None:
if obj: self.delete(key)
else:
value=f()
self.set(key,value,time_expire)
return value
def increment(self,key,value=1,time_expire=300):
newKey=self.__keyFormat__(key)
obj=self.get(newKey)
if obj:
return Client.incr(self,newKey,value)
else:
self.set(newKey,value,time_expire)
return value
def set(self,key,value,time_expire=300):
newKey = self.__keyFormat__(key)
return Client.set(self,newKey,value,time_expire)
def get(self,key):
newKey = self.__keyFormat__(key)
return Client.get(self,newKey)
def delete(self,key):
newKey = self.__keyFormat__(key)
return Client.delete(self,newKey)
def __keyFormat__(self,key):
return '%s/%s' % (self.request.application,key.replace(' ','_'))
| 28.537313
| 72
| 0.620816
| 226
| 1,912
| 5.00885
| 0.29646
| 0.037102
| 0.070671
| 0.077739
| 0.196113
| 0.196113
| 0.134276
| 0.072438
| 0
| 0
| 0
| 0.016417
| 0.267259
| 1,912
| 66
| 73
| 28.969697
| 0.791577
| 0.014121
| 0
| 0.208333
| 0
| 0
| 0.011167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0.020833
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae7079318bfafa24286324b4e5be07089c8ccfb
| 11,135
|
py
|
Python
|
deepcut/deepcut.py
|
wannaphong/deepcut
|
e4f7779caa087c5ffbad3bc4e88f919e300d020c
|
[
"MIT"
] | 17
|
2020-10-06T12:35:19.000Z
|
2021-11-19T07:33:15.000Z
|
deepcut/deepcut.py
|
wannaphong/deepcut
|
e4f7779caa087c5ffbad3bc4e88f919e300d020c
|
[
"MIT"
] | 3
|
2020-10-07T06:29:33.000Z
|
2020-10-23T15:21:09.000Z
|
deepcut/deepcut.py
|
wannaphong/deepcut
|
e4f7779caa087c5ffbad3bc4e88f919e300d020c
|
[
"MIT"
] | 1
|
2020-10-06T13:16:54.000Z
|
2020-10-06T13:16:54.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import numbers
import os
import re
import sys
from itertools import chain
import numpy as np
import scipy.sparse as sp
import six
import pickle
from .model import get_convo_nn2
from .stop_words import THAI_STOP_WORDS
from .utils import CHAR_TYPES_MAP, CHARS_MAP, create_feature_array
MODULE_PATH = os.path.dirname(__file__)
WEIGHT_PATH = os.path.join(MODULE_PATH, 'weight', 'cnn_without_ne_ab.h5')
TOKENIZER = None
def tokenize(text, custom_dict=None):
"""
Tokenize given Thai text string
Input
=====
text: str, Thai text string
custom_dict: str (or list), path to customized dictionary file
It allows the function not to tokenize given dictionary wrongly.
The file should contain custom words separated by line.
Alternatively, you can provide list of custom words too.
Output
======
tokens: list, list of tokenized words
Example
=======
>> deepcut.tokenize('ตัดคำได้ดีมาก')
>> ['ตัดคำ','ได้','ดี','มาก']
"""
global TOKENIZER
if not TOKENIZER:
TOKENIZER = DeepcutTokenizer()
return TOKENIZER.tokenize(text, custom_dict=custom_dict)
def _custom_dict(word, text, word_end):
word_length = len(word)
initial_loc = 0
while True:
try:
start_char = re.search(word, text).start()
first_char = start_char + initial_loc
last_char = first_char + word_length - 1
initial_loc += start_char + word_length
text = text[start_char + word_length:]
word_end[first_char:last_char] = (word_length - 1) * [0]
word_end[last_char] = 1
except:
break
return word_end
def _document_frequency(X):
"""
Count the number of non-zero values for each feature in sparse X.
"""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
return np.diff(sp.csc_matrix(X, copy=False).indptr)
def _check_stop_list(stop):
"""
Check stop words list
ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95
"""
if stop == "thai":
return THAI_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
# assume it's a collection
return frozenset(stop)
def load_model(file_path):
"""
Load saved pickle file of DeepcutTokenizer
Parameters
==========
file_path: str, path to saved model from ``save_model`` method in DeepcutTokenizer
"""
tokenizer = pickle.load(open(file_path, 'rb'))
tokenizer.model = get_convo_nn2()
tokenizer.model = tokenizer.model.load_weights(WEIGHT_PATH)
return tokenizer
class DeepcutTokenizer(object):
"""
Class for tokenizing given Thai text documents using deepcut library
Parameters
==========
ngram_range : tuple, tuple for ngram range for vocabulary, (1, 1) for unigram
and (1, 2) for bigram
stop_words : list or set, list or set of stop words to be removed
if None, max_df can be set to value [0.7, 1.0) to automatically remove
vocabulary. If using "thai", this will use list of pre-populated stop words
max_features : int or None, if provided, only consider number of vocabulary
ordered by term frequencies
max_df : float in range [0.0, 1.0] or int, default=1.0
ignore terms that have a document frequency higher than the given threshold
min_df : float in range [0.0, 1.0] or int, default=1
ignore terms that have a document frequency lower than the given threshold
dtype : type, optional
Example
=======
raw_documents = ['ฉันอยากกินข้าวของฉัน',
'ฉันอยากกินไก่',
'อยากนอนอย่างสงบ']
tokenizer = DeepcutTokenizer(ngram_range=(1, 1))
X = tokenizer.fit_tranform(raw_documents) # document-term matrix in sparse CSR format
>> X.todense()
>> [[0, 0, 1, 0, 1, 0, 2, 1],
[0, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 0, 0]]
>> tokenizer.vocabulary_
>> {'นอน': 0, 'ไก่': 1, 'กิน': 2, 'อย่าง': 3, 'อยาก': 4, 'สงบ': 5, 'ฉัน': 6, 'ข้าว': 7}
"""
def __init__(self, ngram_range=(1, 1), stop_words=None,
max_df=1.0, min_df=1, max_features=None, dtype=np.dtype('float64')):
self.model = get_convo_nn2()
self.model.load_weights(WEIGHT_PATH)
self.vocabulary_ = {}
self.ngram_range = ngram_range
self.dtype = dtype
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
self.stop_words = _check_stop_list(stop_words)
def _word_ngrams(self, tokens):
"""
Turn tokens into a tokens of n-grams
ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153
"""
# handle stop words
if self.stop_words is not None:
tokens = [w for w in tokens if w not in self.stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
def _limit_features(self, X, vocabulary,
high=None, low=None, limit=None):
"""Remove too rare or too common features.
ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L734-L773
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
tfs = np.asarray(X.sum(axis=0)).ravel()
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(vocabulary.items()):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def transform(self, raw_documents, new_document=False):
"""
raw_documents: list, list of new documents to be transformed
new_document: bool, if True, assume seeing documents and build a new self.vobabulary_,
if False, use the previous self.vocabulary_
"""
n_doc = len(raw_documents)
tokenized_documents = []
for doc in raw_documents:
tokens = tokenize(doc) # method in this file
tokens = self._word_ngrams(tokens)
tokenized_documents.append(tokens)
if new_document:
self.vocabulary_ = {v: k for k, v in enumerate(set(chain.from_iterable(tokenized_documents)))}
values, row_indices, col_indices = [], [], []
for r, tokens in enumerate(tokenized_documents):
tokens = self._word_ngrams(tokens)
feature = {}
for token in tokens:
word_index = self.vocabulary_.get(token)
if word_index is not None:
if word_index not in feature.keys():
feature[word_index] = 1
else:
feature[word_index] += 1
for c, v in feature.items():
values.append(v)
row_indices.append(r)
col_indices.append(c)
# document-term matrix in CSR format
X = sp.csr_matrix((values, (row_indices, col_indices)),
shape=(n_doc, len(self.vocabulary_)),
dtype=self.dtype)
# truncate vocabulary by max_df and min_df
if new_document:
max_df = self.max_df
min_df = self.min_df
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, _ = self._limit_features(X, self.vocabulary_,
max_doc_count,
min_doc_count,
self.max_features)
return X
def fit_tranform(self, raw_documents):
"""
Transform given list of raw_documents to document-term matrix in
sparse CSR format (see scipy)
"""
X = self.transform(raw_documents, new_document=True)
return X
def tokenize(self, text, custom_dict=None):
n_pad = 21
if not text:
return [''] # case of empty string
if isinstance(text, str) and sys.version_info.major == 2:
text = text.decode('utf-8')
x_char, x_type = create_feature_array(text, n_pad=n_pad)
word_end = []
# Fix thread-related issue in Keras + TensorFlow + Flask async environment
# ref: https://github.com/keras-team/keras/issues/2397
y_predict = self.model.predict([x_char, x_type])
c = [i[0] for i in y_predict.tolist()]
return list(zip(list(text),c))
def save_model(self, file_path):
"""
Save tokenizer to pickle format
"""
self.model = None # set model to None to successfully save the model
with open(file_path, 'wb') as f:
pickle.dump(self, f)
| 34.367284
| 116
| 0.587068
| 1,495
| 11,135
| 4.218729
| 0.248161
| 0.011892
| 0.00333
| 0.010782
| 0.108768
| 0.073411
| 0.065166
| 0.042334
| 0.042334
| 0.035516
| 0
| 0.015395
| 0.317467
| 11,135
| 324
| 117
| 34.367284
| 0.811711
| 0.29762
| 0
| 0.051136
| 0
| 0
| 0.030397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.068182
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae71d1265e8389449c1186df9eae1ba04f43f40
| 4,824
|
py
|
Python
|
spconv/utils/__init__.py
|
djiajunustc/spconv
|
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
|
[
"Apache-2.0"
] | null | null | null |
spconv/utils/__init__.py
|
djiajunustc/spconv
|
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
|
[
"Apache-2.0"
] | null | null | null |
spconv/utils/__init__.py
|
djiajunustc/spconv
|
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Yan Yan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cumm import tensorview as tv
from contextlib import AbstractContextManager
from spconv.cppconstants import CPU_ONLY_BUILD
from spconv.core_cc.csrc.utils.boxops import BoxOps
from spconv.core_cc.csrc.sparse.all.ops_cpu1d import Point2VoxelCPU as Point2VoxelCPU1d
from spconv.core_cc.csrc.sparse.all.ops_cpu2d import Point2VoxelCPU as Point2VoxelCPU2d
from spconv.core_cc.csrc.sparse.all.ops_cpu3d import Point2VoxelCPU as Point2VoxelCPU3d
from spconv.core_cc.csrc.sparse.all.ops_cpu4d import Point2VoxelCPU as Point2VoxelCPU4d
if not CPU_ONLY_BUILD:
from spconv.core_cc.csrc.sparse.all.ops1d import Point2Voxel as Point2VoxelGPU1d
from spconv.core_cc.csrc.sparse.all.ops2d import Point2Voxel as Point2VoxelGPU2d
from spconv.core_cc.csrc.sparse.all.ops3d import Point2Voxel as Point2VoxelGPU3d
from spconv.core_cc.csrc.sparse.all.ops4d import Point2Voxel as Point2VoxelGPU4d
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
def rbbox_iou(box_corners: np.ndarray, qbox_corners: np.ndarray,
standup_iou: np.ndarray, standup_thresh: float):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
N = box_corners.shape[0]
K = qbox_corners.shape[0]
overlap = np.zeros((N, K), dtype=box_corners.dtype)
BoxOps.rbbox_iou(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners),
tv.from_numpy(standup_iou), tv.from_numpy(overlap),
standup_thresh, False)
return overlap
def rbbox_intersection(box_corners: np.ndarray, qbox_corners: np.ndarray,
standup_iou: np.ndarray, standup_thresh: float):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
N = box_corners.shape[0]
K = qbox_corners.shape[0]
overlap = np.zeros((N, K), dtype=box_corners.dtype)
BoxOps.rbbox_iou(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners),
tv.from_numpy(standup_iou), tv.from_numpy(overlap),
standup_thresh, True)
return overlap
def rbbox_iou_loss(box_corners: np.ndarray, qbox_corners: np.ndarray):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
N = box_corners.shape[0]
overlap = np.zeros((N, ), dtype=box_corners.dtype)
BoxOps.rbbox_iou_aligned(tv.from_numpy(box_corners),
tv.from_numpy(qbox_corners),
tv.from_numpy(overlap), False)
return overlap
def non_max_suppression_cpu(boxes: np.ndarray,
order: np.ndarray,
thresh: float,
eps: float = 0.0):
return BoxOps.non_max_suppression_cpu(tv.from_numpy(boxes),
tv.from_numpy(order), thresh, eps)
def rotate_non_max_suppression_cpu(boxes: np.ndarray, order: np.ndarray,
standup_iou: np.ndarray, thresh: float):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
return BoxOps.rotate_non_max_suppression_cpu(tv.from_numpy(boxes),
tv.from_numpy(order),
tv.from_numpy(standup_iou),
thresh)
| 41.230769
| 101
| 0.676824
| 631
| 4,824
| 4.998415
| 0.277338
| 0.03234
| 0.055802
| 0.045656
| 0.532023
| 0.52156
| 0.515536
| 0.469562
| 0.396322
| 0.396322
| 0
| 0.012983
| 0.249585
| 4,824
| 116
| 102
| 41.586207
| 0.858287
| 0.177446
| 0
| 0.378378
| 0
| 0
| 0.088662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0.013514
| 0.175676
| 0.027027
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae7a32e4ed90bb4e297477064266c060efd4768
| 5,346
|
py
|
Python
|
build/android/gyp/lint.py
|
justremotephone/android_external_chromium_org
|
246856e61da7acf5494076c74198f2aea894a721
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
build/android/gyp/lint.py
|
justremotephone/android_external_chromium_org
|
246856e61da7acf5494076c74198f2aea894a721
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-02-10T21:00:08.000Z
|
2018-03-20T05:09:50.000Z
|
build/android/gyp/lint.py
|
justremotephone/android_external_chromium_org
|
246856e61da7acf5494076c74198f2aea894a721
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2020-11-04T07:24:13.000Z
|
2020-11-04T07:24:13.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Android's lint tool."""
import optparse
import os
import sys
from xml.dom import minidom
from util import build_utils
_SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..'))
def _RunLint(lint_path, config_path, processed_config_path, manifest_path,
result_path, product_dir, src_dirs, classes_dir):
def _RelativizePath(path):
"""Returns relative path to top-level src dir.
Args:
path: A path relative to cwd.
"""
return os.path.relpath(os.path.abspath(path), _SRC_ROOT)
def _ProcessConfigFile():
if not build_utils.IsTimeStale(processed_config_path, [config_path]):
return
with open(config_path, 'rb') as f:
content = f.read().replace(
'PRODUCT_DIR', _RelativizePath(product_dir))
with open(processed_config_path, 'wb') as f:
f.write(content)
def _ProcessResultFile():
with open(result_path, 'rb') as f:
content = f.read().replace(
_RelativizePath(product_dir), 'PRODUCT_DIR')
with open(result_path, 'wb') as f:
f.write(content)
def _ParseAndShowResultFile():
dom = minidom.parse(result_path)
issues = dom.getElementsByTagName('issue')
print >> sys.stderr
for issue in issues:
issue_id = issue.attributes['id'].value
severity = issue.attributes['severity'].value
message = issue.attributes['message'].value
location_elem = issue.getElementsByTagName('location')[0]
path = location_elem.attributes['file'].value
line = location_elem.getAttribute('line')
if line:
error = '%s:%s %s: %s [%s]' % (path, line, severity, message,
issue_id)
else:
# Issues in class files don't have a line number.
error = '%s %s: %s [%s]' % (path, severity, message, issue_id)
print >> sys.stderr, error
for attr in ['errorLine1', 'errorLine2']:
error_line = issue.getAttribute(attr)
if error_line:
print >> sys.stderr, error_line
return len(issues)
_ProcessConfigFile()
cmd = [
lint_path, '-Werror', '--exitcode', '--showall',
'--config', _RelativizePath(processed_config_path),
'--classpath', _RelativizePath(classes_dir),
'--xml', _RelativizePath(result_path),
]
for src in src_dirs:
cmd.extend(['--sources', _RelativizePath(src)])
cmd.append(_RelativizePath(os.path.join(manifest_path, os.pardir)))
if os.path.exists(result_path):
os.remove(result_path)
try:
build_utils.CheckOutput(cmd, cwd=_SRC_ROOT)
except build_utils.CalledProcessError:
# There is a problem with lint usage
if not os.path.exists(result_path):
raise
# There are actual lint issues
else:
num_issues = _ParseAndShowResultFile()
_ProcessResultFile()
msg = ('\nLint found %d new issues.\n'
' - For full explanation refer to %s\n'
' - Wanna suppress these issues?\n'
' 1. Read comment in %s\n'
' 2. Run "python %s %s"\n' %
(num_issues,
_RelativizePath(result_path),
_RelativizePath(config_path),
_RelativizePath(os.path.join(_SRC_ROOT, 'build', 'android',
'lint', 'suppress.py')),
_RelativizePath(result_path)))
print >> sys.stderr, msg
# Lint errors do not fail the build.
return 0
return 0
def main():
parser = optparse.OptionParser()
parser.add_option('--lint-path', help='Path to lint executable.')
parser.add_option('--config-path', help='Path to lint suppressions file.')
parser.add_option('--processed-config-path',
help='Path to processed lint suppressions file.')
parser.add_option('--manifest-path', help='Path to AndroidManifest.xml')
parser.add_option('--result-path', help='Path to XML lint result file.')
parser.add_option('--product-dir', help='Path to product dir.')
parser.add_option('--src-dirs', help='Directories containing java files.')
parser.add_option('--classes-dir', help='Directory containing class files.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--enable', action='store_true',
help='Run lint instead of just touching stamp.')
options, _ = parser.parse_args()
build_utils.CheckOptions(
options, parser, required=['lint_path', 'config_path',
'processed_config_path', 'manifest_path',
'result_path', 'product_dir', 'src_dirs',
'classes_dir'])
src_dirs = build_utils.ParseGypList(options.src_dirs)
rc = 0
if options.enable:
rc = _RunLint(options.lint_path, options.config_path,
options.processed_config_path,
options.manifest_path, options.result_path,
options.product_dir, src_dirs, options.classes_dir)
if options.stamp and not rc:
build_utils.Touch(options.stamp)
return rc
if __name__ == '__main__':
sys.exit(main())
| 33.4125
| 78
| 0.62963
| 648
| 5,346
| 5
| 0.294753
| 0.04321
| 0.046296
| 0.021605
| 0.147222
| 0.107407
| 0.085802
| 0.085802
| 0.053086
| 0.053086
| 0
| 0.002982
| 0.247288
| 5,346
| 159
| 79
| 33.622642
| 0.802187
| 0.081556
| 0
| 0.070796
| 0
| 0
| 0.185937
| 0.00902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053097
| false
| 0
| 0.044248
| 0
| 0.150442
| 0.035398
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ae97714d8f4b22a1d08d058e87732477cbb19c0
| 9,424
|
py
|
Python
|
clif/pybind11/generator.py
|
snu5mumr1k/clif
|
3a907dd7b0986f2b3306c88503d414f4d4f963ae
|
[
"Apache-2.0"
] | null | null | null |
clif/pybind11/generator.py
|
snu5mumr1k/clif
|
3a907dd7b0986f2b3306c88503d414f4d4f963ae
|
[
"Apache-2.0"
] | null | null | null |
clif/pybind11/generator.py
|
snu5mumr1k/clif
|
3a907dd7b0986f2b3306c88503d414f4d4f963ae
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates pybind11 bindings code."""
from typing import Dict, Generator, List, Text, Set
from clif.protos import ast_pb2
from clif.pybind11 import classes
from clif.pybind11 import enums
from clif.pybind11 import function
from clif.pybind11 import function_lib
from clif.pybind11 import type_casters
from clif.pybind11 import utils
I = utils.I
class ModuleGenerator(object):
"""A class that generates pybind11 bindings code from CLIF ast."""
def __init__(self, ast: ast_pb2.AST, module_name: str, header_path: str,
include_paths: List[str]):
self._ast = ast
self._module_name = module_name
self._header_path = header_path
self._include_paths = include_paths
self._unique_classes = {}
def generate_header(self,
ast: ast_pb2.AST) -> Generator[str, None, None]:
"""Generates pybind11 bindings code from CLIF ast."""
includes = set()
for decl in ast.decls:
includes.add(decl.cpp_file)
self._collect_class_cpp_names(decl)
yield '#include "third_party/pybind11/include/pybind11/smart_holder.h"'
for include in includes:
yield f'#include "{include}"'
yield '\n'
for cpp_name in self._unique_classes:
yield f'PYBIND11_SMART_HOLDER_TYPE_CASTERS({cpp_name})'
yield '\n'
for cpp_name, py_name in self._unique_classes.items():
yield f'// CLIF use `{cpp_name}` as {py_name}'
def generate_from(self, ast: ast_pb2.AST):
"""Generates pybind11 bindings code from CLIF ast.
Args:
ast: CLIF ast protobuf.
Yields:
Generated pybind11 bindings code.
"""
yield from self._generate_headlines()
# Find and keep track of virtual functions.
python_override_class_names = {}
for decl in ast.decls:
yield from self._generate_python_override_class_names(
python_override_class_names, decl)
self._collect_class_cpp_names(decl)
yield from type_casters.generate_from(ast, self._include_paths)
yield f'PYBIND11_MODULE({self._module_name}, m) {{'
yield from self._generate_import_modules(ast)
yield I+('m.doc() = "CLIF-generated pybind11-based module for '
f'{ast.source}";')
yield I + 'py::google::ImportStatusModule();'
for decl in ast.decls:
if decl.decltype == ast_pb2.Decl.Type.FUNC:
for s in function.generate_from('m', decl.func, None):
yield I + s
elif decl.decltype == ast_pb2.Decl.Type.CONST:
yield from self._generate_const_variables(decl.const)
elif decl.decltype == ast_pb2.Decl.Type.CLASS:
yield from classes.generate_from(
decl.class_, 'm',
python_override_class_names.get(decl.class_.name.cpp_name, ''))
elif decl.decltype == ast_pb2.Decl.Type.ENUM:
yield from enums.generate_from('m', decl.enum)
yield ''
yield '}'
def _generate_import_modules(self,
ast: ast_pb2.AST) -> Generator[str, None, None]:
for include in ast.pybind11_includes:
# Converts `full/project/path/cheader_pybind11_clif.h` to
# `full.project.path.cheader_pybind11`
names = include.split('/')
names.insert(0, 'google3')
names[-1] = names[-1][:-len('_clif.h')]
module = '.'.join(names)
yield f'py::module_::import("{module}");'
def _generate_headlines(self):
"""Generates #includes and headers."""
includes = set()
for decl in self._ast.decls:
includes.add(decl.cpp_file)
if decl.decltype == ast_pb2.Decl.Type.CONST:
self._generate_const_variables_headers(decl.const, includes)
for include in self._ast.pybind11_includes:
includes.add(include)
for include in self._ast.usertype_includes:
includes.add(include)
yield '#include "third_party/pybind11/include/pybind11/complex.h"'
yield '#include "third_party/pybind11/include/pybind11/functional.h"'
yield '#include "third_party/pybind11/include/pybind11/operators.h"'
yield '#include "third_party/pybind11/include/pybind11/smart_holder.h"'
yield '// potential future optimization: generate this line only as needed.'
yield '#include "third_party/pybind11/include/pybind11/stl.h"'
yield ''
yield '#include "clif/pybind11/runtime.h"'
yield '#include "clif/pybind11/type_casters.h"'
yield ''
for include in includes:
yield f'#include "{include}"'
yield f'#include "{self._header_path}"'
yield ''
yield 'namespace py = pybind11;'
yield ''
def _generate_const_variables_headers(self, const_decl: ast_pb2.ConstDecl,
includes: Set[str]):
if const_decl.type.lang_type == 'complex':
includes.add('third_party/pybind11/include/pybind11/complex.h')
if (const_decl.type.lang_type.startswith('list<') or
const_decl.type.lang_type.startswith('dict<') or
const_decl.type.lang_type.startswith('set<')):
includes.add('third_party/pybind11/include/pybind11/stl.h')
def _generate_const_variables(self, const_decl: ast_pb2.ConstDecl):
"""Generates variables."""
lang_type = const_decl.type.lang_type
if (lang_type in {'int', 'float', 'double', 'bool', 'str'} or
lang_type.startswith('tuple<')):
const_def = I + (f'm.attr("{const_decl.name.native}") = '
f'{const_decl.name.cpp_name};')
else:
const_def = I + (f'm.attr("{const_decl.name.native}") = '
f'py::cast({const_decl.name.cpp_name});')
yield const_def
def _generate_python_override_class_names(
self, python_override_class_names: Dict[Text, Text], decl: ast_pb2.Decl,
trampoline_name_suffix: str = '_trampoline',
self_life_support: str = 'py::trampoline_self_life_support'):
"""Generates Python overrides classes dictionary for virtual functions."""
if decl.decltype == ast_pb2.Decl.Type.CLASS:
virtual_members = []
for member in decl.class_.members:
if member.decltype == ast_pb2.Decl.Type.FUNC and member.func.virtual:
virtual_members.append(member)
if not virtual_members:
return
python_override_class_name = (
f'{decl.class_.name.native}_{trampoline_name_suffix}')
assert decl.class_.name.cpp_name not in python_override_class_names
python_override_class_names[
decl.class_.name.cpp_name] = python_override_class_name
yield (f'struct {python_override_class_name} : '
f'{decl.class_.name.cpp_name}, {self_life_support} {{')
yield I + (
f'using {decl.class_.name.cpp_name}::{decl.class_.name.native};')
for member in virtual_members:
yield from self._generate_virtual_function(
decl.class_.name.native, member.func)
if python_override_class_name:
yield '};'
def _generate_virtual_function(self,
class_name: str, func_decl: ast_pb2.FuncDecl):
"""Generates virtual function overrides calling Python methods."""
return_type = ''
if func_decl.cpp_void_return:
return_type = 'void'
elif func_decl.returns:
for v in func_decl.returns:
if v.HasField('cpp_exact_type'):
return_type = v.cpp_exact_type
params = ', '.join([f'{p.name.cpp_name}' for p in func_decl.params])
params_list_with_types = []
for p in func_decl.params:
params_list_with_types.append(
f'{function_lib.generate_param_type(p)} {p.name.cpp_name}')
params_str_with_types = ', '.join(params_list_with_types)
cpp_const = ''
if func_decl.cpp_const_method:
cpp_const = ' const'
yield I + (f'{return_type} '
f'{func_decl.name.native}({params_str_with_types}) '
f'{cpp_const} override {{')
if func_decl.is_pure_virtual:
pybind11_override = 'PYBIND11_OVERRIDE_PURE'
else:
pybind11_override = 'PYBIND11_OVERRIDE'
yield I + I + f'{pybind11_override}('
yield I + I + I + f'{return_type},'
yield I + I + I + f'{class_name},'
yield I + I + I + f'{func_decl.name.native},'
yield I + I + I + f'{params}'
yield I + I + ');'
yield I + '}'
def _collect_class_cpp_names(self, decl: ast_pb2.Decl,
parent_name: str = '') -> None:
"""Adds every class name to a set. Only to be used in this context."""
if decl.decltype == ast_pb2.Decl.Type.CLASS:
full_native_name = decl.class_.name.native
if parent_name:
full_native_name = '.'.join([parent_name, decl.class_.name.native])
self._unique_classes[decl.class_.name.cpp_name] = full_native_name
for member in decl.class_.members:
self._collect_class_cpp_names(member, full_native_name)
def write_to(channel, lines):
"""Writes the generated code to files."""
for s in lines:
channel.write(s)
channel.write('\n')
| 38.622951
| 80
| 0.671053
| 1,275
| 9,424
| 4.718431
| 0.174118
| 0.017952
| 0.037899
| 0.033245
| 0.407746
| 0.270279
| 0.237699
| 0.126496
| 0.087766
| 0.043551
| 0
| 0.015397
| 0.214346
| 9,424
| 243
| 81
| 38.781893
| 0.797137
| 0.131367
| 0
| 0.187845
| 0
| 0
| 0.21052
| 0.132609
| 0
| 0
| 0
| 0
| 0.005525
| 1
| 0.060773
| false
| 0
| 0.066298
| 0
| 0.138122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aea61815f42420b447d1ce164aa7c65f5c5bc94
| 3,652
|
py
|
Python
|
spyder/dependencies.py
|
aglotero/spyder
|
075d32fa359b728416de36cb0e744715fa5e3943
|
[
"MIT"
] | 2
|
2019-04-25T08:25:37.000Z
|
2019-04-25T08:25:43.000Z
|
spyder/dependencies.py
|
aglotero/spyder
|
075d32fa359b728416de36cb0e744715fa5e3943
|
[
"MIT"
] | 1
|
2020-10-29T19:53:11.000Z
|
2020-10-29T19:53:11.000Z
|
spyder/dependencies.py
|
aglotero/spyder
|
075d32fa359b728416de36cb0e744715fa5e3943
|
[
"MIT"
] | 1
|
2019-02-18T01:28:51.000Z
|
2019-02-18T01:28:51.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Module checking Spyder runtime dependencies"""
import os
# Local imports
from spyder.utils import programs
class Dependency(object):
"""Spyder's dependency
version may starts with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')"""
OK = 'OK'
NOK = 'NOK'
def __init__(self, modname, features, required_version,
installed_version=None, optional=False):
self.modname = modname
self.features = features
self.required_version = required_version
self.optional = optional
if installed_version is None:
try:
self.installed_version = programs.get_module_version(modname)
except:
# NOTE: Don't add any exception type here!
# Modules can fail to import in several ways besides
# ImportError
self.installed_version = None
else:
self.installed_version = installed_version
def check(self):
"""Check if dependency is installed"""
return programs.is_module_installed(self.modname,
self.required_version,
self.installed_version)
def get_installed_version(self):
"""Return dependency status (string)"""
if self.check():
return '%s (%s)' % (self.installed_version, self.OK)
else:
return '%s (%s)' % (self.installed_version, self.NOK)
def get_status(self):
"""Return dependency status (string)"""
if self.check():
return self.OK
else:
return self.NOK
DEPENDENCIES = []
def add(modname, features, required_version, installed_version=None,
optional=False):
"""Add Spyder dependency"""
global DEPENDENCIES
for dependency in DEPENDENCIES:
if dependency.modname == modname:
raise ValueError("Dependency has already been registered: %s"\
% modname)
DEPENDENCIES += [Dependency(modname, features, required_version,
installed_version, optional)]
def check(modname):
"""Check if required dependency is installed"""
for dependency in DEPENDENCIES:
if dependency.modname == modname:
return dependency.check()
else:
raise RuntimeError("Unkwown dependency %s" % modname)
def status(deps=DEPENDENCIES, linesep=os.linesep):
"""Return a status of dependencies"""
maxwidth = 0
col1 = []
col2 = []
for dependency in deps:
title1 = dependency.modname
title1 += ' ' + dependency.required_version
col1.append(title1)
maxwidth = max([maxwidth, len(title1)])
col2.append(dependency.get_installed_version())
text = ""
for index in range(len(deps)):
text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep
return text[:-1]
def missing_dependencies():
"""Return the status of missing dependencies (if any)"""
missing_deps = []
for dependency in DEPENDENCIES:
if not dependency.check() and not dependency.optional:
missing_deps.append(dependency)
if missing_deps:
return status(deps=missing_deps, linesep='<br>')
else:
return ""
| 32.035088
| 78
| 0.585706
| 375
| 3,652
| 5.6
| 0.314667
| 0.099048
| 0.057143
| 0.042857
| 0.220476
| 0.206667
| 0.184762
| 0.157143
| 0.106667
| 0
| 0
| 0.007229
| 0.318182
| 3,652
| 113
| 79
| 32.318584
| 0.835743
| 0.194962
| 0
| 0.169014
| 0
| 0
| 0.03255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.028169
| 0
| 0.309859
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aea82e968ce364fdac8932cf3b83554a12ac797
| 2,947
|
py
|
Python
|
setup.py
|
jasperhyp/Chemprop4SE
|
c02b604b63b6766464db829fea0b306c67302e82
|
[
"MIT"
] | 1
|
2021-12-15T05:18:07.000Z
|
2021-12-15T05:18:07.000Z
|
setup.py
|
jasperhyp/chemprop4SE
|
c02b604b63b6766464db829fea0b306c67302e82
|
[
"MIT"
] | null | null | null |
setup.py
|
jasperhyp/chemprop4SE
|
c02b604b63b6766464db829fea0b306c67302e82
|
[
"MIT"
] | null | null | null |
import os
from setuptools import find_packages, setup
# Load version number
__version__ = None
src_dir = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(src_dir, 'chemprop', '_version.py')
with open(version_file, encoding='utf-8') as fd:
exec(fd.read())
# Load README
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='chemprop',
version=__version__,
author='Kyle Swanson, Kevin Yang, Wengong Jin, Lior Hirschfeld, Allison Tam',
author_email='[email protected]',
description='Molecular Property Prediction with Message Passing Neural Networks',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/chemprop/chemprop',
download_url=f'https://github.com/chemprop/chemprop/v_{__version__}.tar.gz',
project_urls={
'Documentation': 'https://chemprop.readthedocs.io/en/latest/',
'Source': 'https://github.com/chemprop/chemprop',
'PyPi': 'https://pypi.org/project/chemprop/',
'Demo': 'http://chemprop.csail.mit.edu/',
},
license='MIT',
packages=find_packages(),
package_data={'chemprop': ['py.typed']},
entry_points={
'console_scripts': [
'chemprop_train=chemprop.train:chemprop_train',
'chemprop_predict=chemprop.train:chemprop_predict',
'chemprop_fingerprint=chemprop.train:chemprop_fingerprint',
'chemprop_hyperopt=chemprop.hyperparameter_optimization:chemprop_hyperopt',
'chemprop_interpret=chemprop.interpret:chemprop_interpret',
'chemprop_web=chemprop.web.run:chemprop_web',
'sklearn_train=chemprop.sklearn_train:sklearn_train',
'sklearn_predict=chemprop.sklearn_predict:sklearn_predict',
]
},
install_requires=[
'flask>=1.1.2',
'hyperopt>=0.2.3',
'matplotlib>=3.1.3',
'numpy>=1.18.1',
'pandas>=1.0.3',
'pandas-flavor>=0.2.0',
'scikit-learn>=0.22.2.post1',
'scipy>=1.4.1',
'sphinx>=3.1.2',
'tensorboardX>=2.0',
'torch>=1.5.1',
'tqdm>=4.45.0',
'typed-argument-parser>=1.6.1'
],
extras_require={
'test': [
'pytest>=6.2.2',
'parameterized>=0.8.1'
]
},
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
],
keywords=[
'chemistry',
'machine learning',
'property prediction',
'message passing neural network',
'graph neural network'
]
)
| 33.873563
| 88
| 0.599932
| 325
| 2,947
| 5.261538
| 0.452308
| 0.045614
| 0.061404
| 0.060819
| 0.139766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026782
| 0.25246
| 2,947
| 86
| 89
| 34.267442
| 0.749433
| 0.010519
| 0
| 0.025641
| 0
| 0
| 0.528475
| 0.169084
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.025641
| 0.025641
| 0
| 0.025641
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aec0377fc121dfeab883792414df3e21c04a712
| 2,335
|
py
|
Python
|
mars/tensor/indexing/slice.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/tensor/indexing/slice.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/indexing/slice.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes as OperandDef
from ...serialize import KeyField, ListField
from ..operands import TensorHasInput, TensorOperandMixin
from ..array_utils import get_array_module
from ..core import TensorOrder
class TensorSlice(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.SLICE
_input = KeyField('input')
_slices = ListField('slices')
def __init__(self, slices=None, dtype=None, sparse=False, **kw):
super().__init__(_slices=slices, _dtype=dtype, _sparse=sparse, **kw)
@property
def slices(self):
return self._slices
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def _get_order(self, kw, i):
order = kw.pop('order', None)
if order is None:
inp = self.input
if inp is None or inp.order == TensorOrder.C_ORDER:
return TensorOrder.C_ORDER
for shape, slc in zip(inp.shape, self._slices):
if slc is None:
continue
s = slc.indices(shape)
if s[0] == 0 and s[1] == shape and s[2] == 1:
continue
else:
return TensorOrder.C_ORDER
return inp.order
return order[i] if isinstance(order, (list, tuple)) else order
@classmethod
def execute(cls, ctx, op):
inp = ctx[op.inputs[0].key]
if op.input.ndim == 0 and not hasattr(inp, 'shape'):
# scalar, but organize it into an array
inp = get_array_module(inp).array(inp)
x = inp[tuple(op.slices)]
out = op.outputs[0]
ctx[out.key] = x.astype(x.dtype, order=out.order.value, copy=False)
| 33.84058
| 76
| 0.636403
| 312
| 2,335
| 4.653846
| 0.435897
| 0.041322
| 0.035124
| 0.022039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012274
| 0.267238
| 2,335
| 68
| 77
| 34.338235
| 0.836353
| 0.259957
| 0
| 0.097561
| 0
| 0
| 0.012259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.121951
| 0.02439
| 0.463415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aec42c6af54cc3a34d294f61a827b50bebc2cb6
| 50,221
|
py
|
Python
|
ftplugin/python/python/pyflakes/pyflakes/checker.py
|
leewckk/vim.configuration
|
db3faa4343714dd3eb3b7ab19f8cd0b64a52ee57
|
[
"MIT"
] | null | null | null |
ftplugin/python/python/pyflakes/pyflakes/checker.py
|
leewckk/vim.configuration
|
db3faa4343714dd3eb3b7ab19f8cd0b64a52ee57
|
[
"MIT"
] | null | null | null |
ftplugin/python/python/pyflakes/pyflakes/checker.py
|
leewckk/vim.configuration
|
db3faa4343714dd3eb3b7ab19f8cd0b64a52ee57
|
[
"MIT"
] | null | null | null |
"""
Main module.
Implement the central Checker class.
Also, it models the Bindings and Scopes.
"""
import __future__
import doctest
import os
import sys
PY2 = sys.version_info < (3, 0)
PY32 = sys.version_info < (3, 3) # Python 2.5 to 3.2
PY33 = sys.version_info < (3, 4) # Python 2.5 to 3.3
PY34 = sys.version_info < (3, 5) # Python 2.5 to 3.4
try:
sys.pypy_version_info
PYPY = True
except AttributeError:
PYPY = False
builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins'))
try:
import ast
except ImportError: # Python 2.5
import _ast as ast
if 'decorator_list' not in ast.ClassDef._fields:
# Patch the missing attribute 'decorator_list'
ast.ClassDef.decorator_list = ()
ast.FunctionDef.decorator_list = property(lambda s: s.decorators)
from pyflakes import messages
if PY2:
def getNodeType(node_class):
# workaround str.upper() which is locale-dependent
return str(unicode(node_class.__name__).upper())
else:
def getNodeType(node_class):
return node_class.__name__.upper()
# Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally)
if PY32:
def getAlternatives(n):
if isinstance(n, (ast.If, ast.TryFinally)):
return [n.body]
if isinstance(n, ast.TryExcept):
return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
else:
def getAlternatives(n):
if isinstance(n, ast.If):
return [n.body]
if isinstance(n, ast.Try):
return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
if PY34:
LOOP_TYPES = (ast.While, ast.For)
else:
LOOP_TYPES = (ast.While, ast.For, ast.AsyncFor)
class _FieldsOrder(dict):
"""Fix order of AST node fields."""
def _get_fields(self, node_class):
# handle iter before target, and generators before element
fields = node_class._fields
if 'iter' in fields:
key_first = 'iter'.find
elif 'generators' in fields:
key_first = 'generators'.find
else:
key_first = 'value'.find
return tuple(sorted(fields, key=key_first, reverse=True))
def __missing__(self, node_class):
self[node_class] = fields = self._get_fields(node_class)
return fields
def counter(items):
"""
Simplest required implementation of collections.Counter. Required as 2.6
does not have Counter in collections.
"""
results = {}
for item in items:
results[item] = results.get(item, 0) + 1
return results
def iter_child_nodes(node, omit=None, _fields_order=_FieldsOrder()):
"""
Yield all direct child nodes of *node*, that is, all fields that
are nodes and all items of fields that are lists of nodes.
"""
for name in _fields_order[node.__class__]:
if name == omit:
continue
field = getattr(node, name, None)
if isinstance(field, ast.AST):
yield field
elif isinstance(field, list):
for item in field:
yield item
def convert_to_value(item):
if isinstance(item, ast.Str):
return item.s
elif hasattr(ast, 'Bytes') and isinstance(item, ast.Bytes):
return item.s
elif isinstance(item, ast.Tuple):
return tuple(convert_to_value(i) for i in item.elts)
elif isinstance(item, ast.Num):
return item.n
elif isinstance(item, ast.Name):
result = VariableKey(item=item)
constants_lookup = {
'True': True,
'False': False,
'None': None,
}
return constants_lookup.get(
result.name,
result,
)
elif (not PY33) and isinstance(item, ast.NameConstant):
# None, True, False are nameconstants in python3, but names in 2
return item.value
else:
return UnhandledKeyType()
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, node) indicating the scope and
the node that this binding was last used.
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
def redefines(self, other):
return isinstance(other, Definition) and self.name == other.name
class Definition(Binding):
"""
A binding that defines a function or a class.
"""
class UnhandledKeyType(object):
"""
A dictionary key of a type that we cannot or do not check for duplicates.
"""
class VariableKey(object):
"""
A dictionary key which is a variable.
@ivar item: The variable AST object.
"""
def __init__(self, item):
self.name = item.id
def __eq__(self, compare):
return (
compare.__class__ == self.__class__
and compare.name == self.name
)
def __hash__(self):
return hash(self.name)
class Importation(Definition):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source, full_name=None):
self.fullName = full_name or name
self.redefined = []
super(Importation, self).__init__(name, source)
def redefines(self, other):
if isinstance(other, SubmoduleImportation):
# See note in SubmoduleImportation about RedefinedWhileUnused
return self.fullName == other.fullName
return isinstance(other, Definition) and self.name == other.name
def _has_alias(self):
"""Return whether importation needs an as clause."""
return not self.fullName.split('.')[-1] == self.name
@property
def source_statement(self):
"""Generate a source statement equivalent to the import."""
if self._has_alias():
return 'import %s as %s' % (self.fullName, self.name)
else:
return 'import %s' % self.fullName
def __str__(self):
"""Return import full name with alias."""
if self._has_alias():
return self.fullName + ' as ' + self.name
else:
return self.fullName
class SubmoduleImportation(Importation):
"""
A binding created by a submodule import statement.
A submodule import is a special case where the root module is implicitly
imported, without an 'as' clause, and the submodule is also imported.
Python does not restrict which attributes of the root module may be used.
This class is only used when the submodule import is without an 'as' clause.
pyflakes handles this case by registering the root module name in the scope,
allowing any attribute of the root module to be accessed.
RedefinedWhileUnused is suppressed in `redefines` unless the submodule
name is also the same, to avoid false positives.
"""
def __init__(self, name, source):
# A dot should only appear in the name when it is a submodule import
assert '.' in name and (not source or isinstance(source, ast.Import))
package_name = name.split('.')[0]
super(SubmoduleImportation, self).__init__(package_name, source)
self.fullName = name
def redefines(self, other):
if isinstance(other, Importation):
return self.fullName == other.fullName
return super(SubmoduleImportation, self).redefines(other)
def __str__(self):
return self.fullName
@property
def source_statement(self):
return 'import ' + self.fullName
class ImportationFrom(Importation):
def __init__(self, name, source, module, real_name=None):
self.module = module
self.real_name = real_name or name
if module.endswith('.'):
full_name = module + self.real_name
else:
full_name = module + '.' + self.real_name
super(ImportationFrom, self).__init__(name, source, full_name)
def __str__(self):
"""Return import full name with alias."""
if self.real_name != self.name:
return self.fullName + ' as ' + self.name
else:
return self.fullName
@property
def source_statement(self):
if self.real_name != self.name:
return 'from %s import %s as %s' % (self.module,
self.real_name,
self.name)
else:
return 'from %s import %s' % (self.module, self.name)
class StarImportation(Importation):
"""A binding created by a 'from x import *' statement."""
def __init__(self, name, source):
super(StarImportation, self).__init__('*', source)
# Each star importation needs a unique name, and
# may not be the module name otherwise it will be deemed imported
self.name = name + '.*'
self.fullName = name
@property
def source_statement(self):
return 'from ' + self.fullName + ' import *'
def __str__(self):
# When the module ends with a ., avoid the ambiguous '..*'
if self.fullName.endswith('.'):
return self.source_statement
else:
return self.name
class FutureImportation(ImportationFrom):
"""
A binding created by a from `__future__` import statement.
`__future__` imports are implicitly used.
"""
def __init__(self, name, source, scope):
super(FutureImportation, self).__init__(name, source, '__future__')
self.used = (scope, source)
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class FunctionDefinition(Definition):
pass
class ClassDefinition(Definition):
pass
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only C{__all__} assignment that can be recognized is one which takes
the value of a literal list containing literal strings. For example::
__all__ = ["foo", "bar"]
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def __init__(self, name, source, scope):
if '__all__' in scope and isinstance(source, ast.AugAssign):
self.names = list(scope['__all__'].names)
else:
self.names = []
if isinstance(source.value, (ast.List, ast.Tuple)):
for node in source.value.elts:
if isinstance(node, ast.Str):
self.names.append(node.s)
super(ExportBinding, self).__init__(name, source)
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
scope_cls = self.__class__.__name__
return '<%s at 0x%x %s>' % (scope_cls, id(self), dict.__repr__(self))
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
usesLocals = False
alwaysUsed = set(['__tracebackhide__',
'__traceback_info__', '__traceback_supplement__'])
def __init__(self):
super(FunctionScope, self).__init__()
# Simplify: manage the special locals as globals
self.globals = self.alwaysUsed.copy()
self.returnValue = None # First non-empty return
self.isGenerator = False # Detect a generator
def unusedAssignments(self):
"""
Return a generator for the assignments which have not been used.
"""
for name, binding in self.items():
if (not binding.used and name not in self.globals
and not self.usesLocals
and isinstance(binding, Assignment)):
yield name, binding
class GeneratorScope(Scope):
pass
class ModuleScope(Scope):
"""Scope for a module."""
_futures_allowed = True
class DoctestScope(ModuleScope):
"""Scope for a doctest."""
# Globally defined names which are not attributes of the builtins module, or
# are only present on some platforms.
_MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError']
def getNodeName(node):
# Returns node.id, or node.name, or None
if hasattr(node, 'id'): # One of the many nodes with an id
return node.id
if hasattr(node, 'name'): # an ExceptHandler node
return node.name
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
nodeDepth = 0
offset = None
traceTree = False
builtIns = set(builtin_vars).union(_MAGIC_GLOBALS)
_customBuiltIns = os.environ.get('PYFLAKES_BUILTINS')
if _customBuiltIns:
builtIns.update(_customBuiltIns.split(','))
del _customBuiltIns
def __init__(self, tree, filename='(none)', builtins=None,
withDoctest='PYFLAKES_DOCTEST' in os.environ):
self._nodeHandlers = {}
self._deferredFunctions = []
self._deferredAssignments = []
self.deadScopes = []
self.messages = []
self.filename = filename
if builtins:
self.builtIns = self.builtIns.union(builtins)
self.withDoctest = withDoctest
self.scopeStack = [ModuleScope()]
self.exceptHandlers = [()]
self.root = tree
self.handleChildren(tree)
self.runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self.runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisily if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.checkDeadScopes()
def deferFunction(self, callable):
"""
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
"""
self._deferredFunctions.append((callable, self.scopeStack[:], self.offset))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:], self.offset))
def runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope, offset in deferred:
self.scopeStack = scope
self.offset = offset
handler()
def _in_doctest(self):
return (len(self.scopeStack) >= 2 and
isinstance(self.scopeStack[1], DoctestScope))
@property
def futuresAllowed(self):
if not all(isinstance(scope, ModuleScope)
for scope in self.scopeStack):
return False
return self.scope._futures_allowed
@futuresAllowed.setter
def futuresAllowed(self, value):
assert value is False
if isinstance(self.scope, ModuleScope):
self.scope._futures_allowed = False
@property
def scope(self):
return self.scopeStack[-1]
def popScope(self):
self.deadScopes.append(self.scopeStack.pop())
def checkDeadScopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.deadScopes:
# imports in classes are public members
if isinstance(scope, ClassScope):
continue
all_binding = scope.get('__all__')
if all_binding and not isinstance(all_binding, ExportBinding):
all_binding = None
if all_binding:
all_names = set(all_binding.names)
undefined = all_names.difference(scope)
else:
all_names = undefined = []
if undefined:
if not scope.importStarred and \
os.path.basename(self.filename) != '__init__.py':
# Look for possible mistakes in the export list
for name in undefined:
self.report(messages.UndefinedExport,
scope['__all__'].source, name)
# mark all import '*' as used by the undefined in __all__
if scope.importStarred:
for binding in scope.values():
if isinstance(binding, StarImportation):
binding.used = all_binding
# Look for imported names that aren't used.
for value in scope.values():
if isinstance(value, Importation):
used = value.used or value.name in all_names
if not used:
messg = messages.UnusedImport
self.report(messg, value.source, str(value))
for node in value.redefined:
if isinstance(self.getParent(node), ast.For):
messg = messages.ImportShadowedByLoopVar
elif used:
continue
else:
messg = messages.RedefinedWhileUnused
self.report(messg, node, value.name, value.source)
def pushScope(self, scopeClass=FunctionScope):
self.scopeStack.append(scopeClass())
def report(self, messageClass, *args, **kwargs):
self.messages.append(messageClass(self.filename, *args, **kwargs))
def getParent(self, node):
# Lookup the first parent which is not Tuple, List or Starred
while True:
node = node.parent
if not hasattr(node, 'elts') and not hasattr(node, 'ctx'):
return node
def getCommonAncestor(self, lnode, rnode, stop):
if stop in (lnode, rnode) or not (hasattr(lnode, 'parent') and
hasattr(rnode, 'parent')):
return None
if lnode is rnode:
return lnode
if (lnode.depth > rnode.depth):
return self.getCommonAncestor(lnode.parent, rnode, stop)
if (lnode.depth < rnode.depth):
return self.getCommonAncestor(lnode, rnode.parent, stop)
return self.getCommonAncestor(lnode.parent, rnode.parent, stop)
def descendantOf(self, node, ancestors, stop):
for a in ancestors:
if self.getCommonAncestor(node, a, stop):
return True
return False
def differentForks(self, lnode, rnode):
"""True, if lnode and rnode are located on different forks of IF/TRY"""
ancestor = self.getCommonAncestor(lnode, rnode, self.root)
parts = getAlternatives(ancestor)
if parts:
for items in parts:
if self.descendantOf(lnode, items, ancestor) ^ \
self.descendantOf(rnode, items, ancestor):
return True
return False
def addBinding(self, node, value):
"""
Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the new value, a Binding instance
"""
# assert value.source in (node, node.parent):
for scope in self.scopeStack[::-1]:
if value.name in scope:
break
existing = scope.get(value.name)
if existing and not self.differentForks(node, existing.source):
parent_stmt = self.getParent(value.source)
if isinstance(existing, Importation) and isinstance(parent_stmt, ast.For):
self.report(messages.ImportShadowedByLoopVar,
node, value.name, existing.source)
elif scope is self.scope:
if (isinstance(parent_stmt, ast.comprehension) and
not isinstance(self.getParent(existing.source),
(ast.For, ast.comprehension))):
self.report(messages.RedefinedInListComp,
node, value.name, existing.source)
elif not existing.used and value.redefines(existing):
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
elif isinstance(existing, Importation) and value.redefines(existing):
existing.redefined.append(node)
if value.name in self.scope:
# then assume the rebound name is used as a global or within a loop
value.used = self.scope[value.name].used
self.scope[value.name] = value
def getNodeHandler(self, node_class):
try:
return self._nodeHandlers[node_class]
except KeyError:
nodeType = getNodeType(node_class)
self._nodeHandlers[node_class] = handler = getattr(self, nodeType)
return handler
def handleNodeLoad(self, node):
name = getNodeName(node)
if not name:
return
in_generators = None
importStarred = None
# try enclosing function scopes and global scope
for scope in self.scopeStack[-1::-1]:
# only generators used in a class scope can access the names
# of the class. this is skipped during the first iteration
if in_generators is False and isinstance(scope, ClassScope):
continue
try:
scope[name].used = (self.scope, node)
except KeyError:
pass
else:
return
importStarred = importStarred or scope.importStarred
if in_generators is not False:
in_generators = isinstance(scope, GeneratorScope)
# look in the built-ins
if name in self.builtIns:
return
if importStarred:
from_list = []
for scope in self.scopeStack[-1::-1]:
for binding in scope.values():
if isinstance(binding, StarImportation):
# mark '*' imports as used for each scope
binding.used = (self.scope, node)
from_list.append(binding.fullName)
# report * usage, with a list of possible sources
from_list = ', '.join(sorted(from_list))
self.report(messages.ImportStarUsage, node, name, from_list)
return
if name == '__path__' and os.path.basename(self.filename) == '__init__.py':
# the special name __path__ is valid only in packages
return
# protected with a NameError handler?
if 'NameError' not in self.exceptHandlers[-1]:
self.report(messages.UndefinedName, node, name)
def handleNodeStore(self, node):
name = getNodeName(node)
if not name:
return
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and name not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
used = name in scope and scope[name].used
if used and used[0] is self.scope and name not in self.scope.globals:
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[name].used[1], name, scope[name].source)
break
parent_stmt = self.getParent(node)
if isinstance(parent_stmt, (ast.For, ast.comprehension)) or (
parent_stmt != node.parent and
not self.isLiteralTupleUnpacking(parent_stmt)):
binding = Binding(name, node)
elif name == '__all__' and isinstance(self.scope, ModuleScope):
binding = ExportBinding(name, node.parent, self.scope)
else:
binding = Assignment(name, node)
self.addBinding(node, binding)
def handleNodeDelete(self, node):
def on_conditional_branch():
"""
Return `True` if node is part of a conditional body.
"""
current = getattr(node, 'parent', None)
while current:
if isinstance(current, (ast.If, ast.While, ast.IfExp)):
return True
current = getattr(current, 'parent', None)
return False
name = getNodeName(node)
if not name:
return
if on_conditional_branch():
# We cannot predict if this conditional branch is going to
# be executed.
return
if isinstance(self.scope, FunctionScope) and name in self.scope.globals:
self.scope.globals.remove(name)
else:
try:
del self.scope[name]
except KeyError:
self.report(messages.UndefinedName, node, name)
def handleChildren(self, tree, omit=None):
for node in iter_child_nodes(tree, omit=omit):
self.handleNode(node, tree)
def isLiteralTupleUnpacking(self, node):
if isinstance(node, ast.Assign):
for child in node.targets + [node.value]:
if not hasattr(child, 'elts'):
return False
return True
def isDocstring(self, node):
"""
Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.
"""
return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Str))
def getDocstring(self, node):
if isinstance(node, ast.Expr):
node = node.value
if not isinstance(node, ast.Str):
return (None, None)
if PYPY:
doctest_lineno = node.lineno - 1
else:
# Computed incorrectly if the docstring has backslash
doctest_lineno = node.lineno - node.s.count('\n') - 1
return (node.s, doctest_lineno)
def handleNode(self, node, parent):
if node is None:
return
if self.offset and getattr(node, 'lineno', None) is not None:
node.lineno += self.offset[0]
node.col_offset += self.offset[1]
if self.traceTree:
print(' ' * self.nodeDepth + node.__class__.__name__)
if self.futuresAllowed and not (isinstance(node, ast.ImportFrom) or
self.isDocstring(node)):
self.futuresAllowed = False
self.nodeDepth += 1
node.depth = self.nodeDepth
node.parent = parent
try:
handler = self.getNodeHandler(node.__class__)
handler(node)
finally:
self.nodeDepth -= 1
if self.traceTree:
print(' ' * self.nodeDepth + 'end ' + node.__class__.__name__)
_getDoctestExamples = doctest.DocTestParser().get_examples
def handleDoctests(self, node):
try:
if hasattr(node, 'docstring'):
docstring = node.docstring
# This is just a reasonable guess. In Python 3.7, docstrings no
# longer have line numbers associated with them. This will be
# incorrect if there are empty lines between the beginning
# of the function and the docstring.
node_lineno = node.lineno
if hasattr(node, 'args'):
node_lineno = max([node_lineno] +
[arg.lineno for arg in node.args.args])
else:
(docstring, node_lineno) = self.getDocstring(node.body[0])
examples = docstring and self._getDoctestExamples(docstring)
except (ValueError, IndexError):
# e.g. line 6 of the docstring for <string> has inconsistent
# leading whitespace: ...
return
if not examples:
return
# Place doctest in module scope
saved_stack = self.scopeStack
self.scopeStack = [self.scopeStack[0]]
node_offset = self.offset or (0, 0)
self.pushScope(DoctestScope)
underscore_in_builtins = '_' in self.builtIns
if not underscore_in_builtins:
self.builtIns.add('_')
for example in examples:
try:
tree = compile(example.source, "<doctest>", "exec", ast.PyCF_ONLY_AST)
except SyntaxError:
e = sys.exc_info()[1]
if PYPY:
e.offset += 1
position = (node_lineno + example.lineno + e.lineno,
example.indent + 4 + (e.offset or 0))
self.report(messages.DoctestSyntaxError, node, position)
else:
self.offset = (node_offset[0] + node_lineno + example.lineno,
node_offset[1] + example.indent + 4)
self.handleChildren(tree)
self.offset = node_offset
if not underscore_in_builtins:
self.builtIns.remove('_')
self.popScope()
self.scopeStack = saved_stack
def ignore(self, node):
pass
# "stmt" type nodes
DELETE = PRINT = FOR = ASYNCFOR = WHILE = IF = WITH = WITHITEM = \
ASYNCWITH = ASYNCWITHITEM = RAISE = TRYFINALLY = EXEC = \
EXPR = ASSIGN = handleChildren
PASS = ignore
# "expr" type nodes
BOOLOP = BINOP = UNARYOP = IFEXP = SET = \
COMPARE = CALL = REPR = ATTRIBUTE = SUBSCRIPT = \
STARRED = NAMECONSTANT = handleChildren
NUM = STR = BYTES = ELLIPSIS = ignore
# "slice" type nodes
SLICE = EXTSLICE = INDEX = handleChildren
# expression contexts are node instances too, though being constants
LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore
# same for operators
AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \
BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = \
MATMULT = ignore
# additional node types
COMPREHENSION = KEYWORD = FORMATTEDVALUE = JOINEDSTR = handleChildren
def DICT(self, node):
# Complain if there are duplicate keys with different values
# If they have the same value it's not going to cause potentially
# unexpected behaviour so we'll not complain.
keys = [
convert_to_value(key) for key in node.keys
]
key_counts = counter(keys)
duplicate_keys = [
key for key, count in key_counts.items()
if count > 1
]
for key in duplicate_keys:
key_indices = [i for i, i_key in enumerate(keys) if i_key == key]
values = counter(
convert_to_value(node.values[index])
for index in key_indices
)
if any(count == 1 for value, count in values.items()):
for key_index in key_indices:
key_node = node.keys[key_index]
if isinstance(key, VariableKey):
self.report(messages.MultiValueRepeatedKeyVariable,
key_node,
key.name)
else:
self.report(
messages.MultiValueRepeatedKeyLiteral,
key_node,
key,
)
self.handleChildren(node)
def ASSERT(self, node):
if isinstance(node.test, ast.Tuple) and node.test.elts != []:
self.report(messages.AssertTuple, node)
self.handleChildren(node)
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
global_scope_index = 1 if self._in_doctest() else 0
global_scope = self.scopeStack[global_scope_index]
# Ignore 'global' statement in global scope.
if self.scope is not global_scope:
# One 'global' statement can bind multiple (comma-delimited) names.
for node_name in node.names:
node_value = Assignment(node_name, node)
# Remove UndefinedName messages already reported for this name.
# TODO: if the global is not used in this scope, it does not
# become a globally defined name. See test_unused_global.
self.messages = [
m for m in self.messages if not
isinstance(m, messages.UndefinedName) or
m.message_args[0] != node_name]
# Bind name to global scope if it doesn't exist already.
global_scope.setdefault(node_name, node_value)
# Bind name to non-global scopes, but as already "used".
node_value.used = (global_scope, node)
for scope in self.scopeStack[global_scope_index + 1:]:
scope[node_name] = node_value
NONLOCAL = GLOBAL
def GENERATOREXP(self, node):
self.pushScope(GeneratorScope)
self.handleChildren(node)
self.popScope()
LISTCOMP = handleChildren if PY2 else GENERATOREXP
DICTCOMP = SETCOMP = GENERATOREXP
def NAME(self, node):
"""
Handle occurrence of Name (which can be a load/store/delete access.)
"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (ast.Load, ast.AugLoad)):
self.handleNodeLoad(node)
if (node.id == 'locals' and isinstance(self.scope, FunctionScope)
and isinstance(node.parent, ast.Call)):
# we are doing locals() call in current scope
self.scope.usesLocals = True
elif isinstance(node.ctx, (ast.Store, ast.AugStore)):
self.handleNodeStore(node)
elif isinstance(node.ctx, ast.Del):
self.handleNodeDelete(node)
else:
# must be a Param context -- this only happens for names in function
# arguments, but these aren't dispatched through here
raise RuntimeError("Got impossible expression context: %r" % (node.ctx,))
def CONTINUE(self, node):
# Walk the tree up until we see a loop (OK), a function or class
# definition (not OK), for 'continue', a finally block (not OK), or
# the top module scope (not OK)
n = node
while hasattr(n, 'parent'):
n, n_child = n.parent, n
if isinstance(n, LOOP_TYPES):
# Doesn't apply unless it's in the loop itself
if n_child not in n.orelse:
return
if isinstance(n, (ast.FunctionDef, ast.ClassDef)):
break
# Handle Try/TryFinally difference in Python < and >= 3.3
if hasattr(n, 'finalbody') and isinstance(node, ast.Continue):
if n_child in n.finalbody:
self.report(messages.ContinueInFinally, node)
return
if isinstance(node, ast.Continue):
self.report(messages.ContinueOutsideLoop, node)
else: # ast.Break
self.report(messages.BreakOutsideLoop, node)
BREAK = CONTINUE
def RETURN(self, node):
if isinstance(self.scope, (ClassScope, ModuleScope)):
self.report(messages.ReturnOutsideFunction, node)
return
if (
node.value and
hasattr(self.scope, 'returnValue') and
not self.scope.returnValue
):
self.scope.returnValue = node.value
self.handleNode(node.value, node)
def YIELD(self, node):
if isinstance(self.scope, (ClassScope, ModuleScope)):
self.report(messages.YieldOutsideFunction, node)
return
self.scope.isGenerator = True
self.handleNode(node.value, node)
AWAIT = YIELDFROM = YIELD
def FUNCTIONDEF(self, node):
for deco in node.decorator_list:
self.handleNode(deco, node)
self.LAMBDA(node)
self.addBinding(node, FunctionDefinition(node.name, node))
# doctest does not process doctest within a doctest,
# or in nested functions.
if (self.withDoctest and
not self._in_doctest() and
not isinstance(self.scope, FunctionScope)):
self.deferFunction(lambda: self.handleDoctests(node))
ASYNCFUNCTIONDEF = FUNCTIONDEF
def LAMBDA(self, node):
args = []
annotations = []
if PY2:
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, ast.Tuple):
addArgs(arg.elts)
else:
args.append(arg.id)
addArgs(node.args.args)
defaults = node.args.defaults
else:
for arg in node.args.args + node.args.kwonlyargs:
args.append(arg.arg)
annotations.append(arg.annotation)
defaults = node.args.defaults + node.args.kw_defaults
# Only for Python3 FunctionDefs
is_py3_func = hasattr(node, 'returns')
for arg_name in ('vararg', 'kwarg'):
wildcard = getattr(node.args, arg_name)
if not wildcard:
continue
args.append(wildcard if PY33 else wildcard.arg)
if is_py3_func:
if PY33: # Python 2.5 to 3.3
argannotation = arg_name + 'annotation'
annotations.append(getattr(node.args, argannotation))
else: # Python >= 3.4
annotations.append(wildcard.annotation)
if is_py3_func:
annotations.append(node.returns)
if len(set(args)) < len(args):
for (idx, arg) in enumerate(args):
if arg in args[:idx]:
self.report(messages.DuplicateArgument, node, arg)
for child in annotations + defaults:
if child:
self.handleNode(child, node)
def runFunction():
self.pushScope()
for name in args:
self.addBinding(node, Argument(name, node))
if isinstance(node.body, list):
# case for FunctionDefs
for stmt in node.body:
self.handleNode(stmt, node)
else:
# case for Lambdas
self.handleNode(node.body, node)
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.unusedAssignments():
self.report(messages.UnusedVariable, binding.source, name)
self.deferAssignment(checkUnusedAssignments)
if PY32:
def checkReturnWithArgumentInsideGenerator():
"""
Check to see if there is any return statement with
arguments but the function is a generator.
"""
if self.scope.isGenerator and self.scope.returnValue:
self.report(messages.ReturnWithArgsInsideGenerator,
self.scope.returnValue)
self.deferAssignment(checkReturnWithArgumentInsideGenerator)
self.popScope()
self.deferFunction(runFunction)
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.pushScope(ClassScope)
# doctest does not process doctest within a doctest
# classes within classes are processed.
if (self.withDoctest and
not self._in_doctest() and
not isinstance(self.scope, FunctionScope)):
self.deferFunction(lambda: self.handleDoctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node))
def AUGASSIGN(self, node):
self.handleNodeLoad(node.target)
self.handleNode(node.value, node)
self.handleNode(node.target, node)
def TUPLE(self, node):
if not PY2 and isinstance(node.ctx, ast.Store):
# Python 3 advanced tuple unpacking: a, *b, c = d.
# Only one starred expression is allowed, and no more than 1<<8
# assignments are allowed before a stared expression. There is
# also a limit of 1<<24 expressions after the starred expression,
# which is impossible to test due to memory restrictions, but we
# add it here anyway
has_starred = False
star_loc = -1
for i, n in enumerate(node.elts):
if isinstance(n, ast.Starred):
if has_starred:
self.report(messages.TwoStarredExpressions, node)
# The SyntaxError doesn't distinguish two from more
# than two.
break
has_starred = True
star_loc = i
if star_loc >= 1 << 8 or len(node.elts) - star_loc - 1 >= 1 << 24:
self.report(messages.TooManyExpressionsInStarredAssignment, node)
self.handleChildren(node)
LIST = TUPLE
def IMPORT(self, node):
for alias in node.names:
if '.' in alias.name and not alias.asname:
importation = SubmoduleImportation(alias.name, node)
else:
name = alias.asname or alias.name
importation = Importation(name, node, alias.name)
self.addBinding(node, importation)
def IMPORTFROM(self, node):
if node.module == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport,
node, [n.name for n in node.names])
else:
self.futuresAllowed = False
module = ('.' * node.level) + (node.module or '')
for alias in node.names:
name = alias.asname or alias.name
if node.module == '__future__':
importation = FutureImportation(name, node, self.scope)
if alias.name not in __future__.all_feature_names:
self.report(messages.FutureFeatureNotDefined,
node, alias.name)
elif alias.name == '*':
# Only Python 2, local import * is a SyntaxWarning
if not PY2 and not isinstance(self.scope, ModuleScope):
self.report(messages.ImportStarNotPermitted,
node, module)
continue
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node, module)
importation = StarImportation(module, node)
else:
importation = ImportationFrom(name, node,
module, alias.name)
self.addBinding(node, importation)
def TRY(self, node):
handler_names = []
# List the exception handlers
for i, handler in enumerate(node.handlers):
if isinstance(handler.type, ast.Tuple):
for exc_type in handler.type.elts:
handler_names.append(getNodeName(exc_type))
elif handler.type:
handler_names.append(getNodeName(handler.type))
if handler.type is None and i < len(node.handlers) - 1:
self.report(messages.DefaultExceptNotLast, handler)
# Memorize the except handlers and process the body
self.exceptHandlers.append(handler_names)
for child in node.body:
self.handleNode(child, node)
self.exceptHandlers.pop()
# Process the other nodes: "except:", "else:", "finally:"
self.handleChildren(node, omit='body')
TRYEXCEPT = TRY
def EXCEPTHANDLER(self, node):
if PY2 or node.name is None:
self.handleChildren(node)
return
# 3.x: the name of the exception, which is not a Name node, but
# a simple string, creates a local that is only bound within the scope
# of the except: block.
for scope in self.scopeStack[::-1]:
if node.name in scope:
is_name_previously_defined = True
break
else:
is_name_previously_defined = False
self.handleNodeStore(node)
self.handleChildren(node)
if not is_name_previously_defined:
# See discussion on https://github.com/PyCQA/pyflakes/pull/59
# We're removing the local name since it's being unbound
# after leaving the except: block and it's always unbound
# if the except: block is never entered. This will cause an
# "undefined name" error raised if the checked code tries to
# use the name afterwards.
#
# Unless it's been removed already. Then do nothing.
try:
del self.scope[node.name]
except KeyError:
pass
def ANNASSIGN(self, node):
if node.value:
# Only bind the *targets* if the assignment has a value.
# Otherwise it's not really ast.Store and shouldn't silence
# UndefinedLocal warnings.
self.handleNode(node.target, node)
self.handleNode(node.annotation, node)
if node.value:
# If the assignment has value, handle the *value* now.
self.handleNode(node.value, node)
| 36.900073
| 87
| 0.567054
| 5,448
| 50,221
| 5.127019
| 0.14464
| 0.012566
| 0.017399
| 0.00401
| 0.157525
| 0.117571
| 0.090255
| 0.062366
| 0.05005
| 0.030145
| 0
| 0.004061
| 0.352821
| 50,221
| 1,360
| 88
| 36.927206
| 0.855332
| 0.2036
| 0
| 0.260369
| 0
| 0
| 0.017609
| 0.000637
| 0
| 0
| 0
| 0.000735
| 0.004608
| 1
| 0.101382
| false
| 0.009217
| 0.06682
| 0.013825
| 0.395161
| 0.002304
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aed26d63f42531533566c9bcedcbe6f5289c5e4
| 3,349
|
py
|
Python
|
AutoScreenShot.py
|
infinyte7/Auto-Screenshot
|
5d8e39af61f3361f372ffb48add53171b7cea672
|
[
"MIT"
] | 3
|
2020-10-29T13:57:15.000Z
|
2021-02-19T21:59:15.000Z
|
AutoScreenShot.py
|
infinyte7/Auto-Screenshot
|
5d8e39af61f3361f372ffb48add53171b7cea672
|
[
"MIT"
] | null | null | null |
AutoScreenShot.py
|
infinyte7/Auto-Screenshot
|
5d8e39af61f3361f372ffb48add53171b7cea672
|
[
"MIT"
] | 1
|
2021-02-19T21:59:48.000Z
|
2021-02-19T21:59:48.000Z
|
# Project Name: Auto Screenshot
# Description: Take screenshot of screen when any change take place.
# Author: Mani (Infinyte7)
# Date: 26-10-2020
# License: MIT
from pyscreenshot import grab
from PIL import ImageChops
import os
import time
import subprocess, sys
from datetime import datetime
import tkinter as tk
from tkinter import *
from tkinter import font
class AutoScreenshot:
def __init__(self, master):
self.root = root
root.title('Auto Screenshot')
root.config(bg="white")
fontRoboto = font.Font(family='Roboto', size=16, weight='bold')
# project name label
projectTitleLabel = Label(root, text="Auto Screenshot v1.0.0")
projectTitleLabel.config(font=fontRoboto, bg="white", fg="#5599ff")
projectTitleLabel.pack(padx="10")
# start button
btn_start = Button(root, text="Start", command=self.start)
btn_start.config(highlightthickness=0, bd=0, fg="white", bg="#5fd38d",
activebackground="#5fd38d", activeforeground="white", font=fontRoboto)
btn_start.pack(padx="10", fill=BOTH)
# close button
btn_start = Button(root, text="Close", command=self.close)
btn_start.config(highlightthickness=0, bd=0, fg="white", bg="#f44336",
activebackground="#ff7043", activeforeground="white", font=fontRoboto)
btn_start.pack(padx="10", pady="10", fill=BOTH)
def start(self):
# Create folder to store images
directory = "Screenshots"
self.new_folder = directory + "/" + datetime.now().strftime("%Y_%m_%d-%I_%M_%p")
# all images to one folder
if not os.path.exists(directory):
os.makedirs(directory)
# new folder for storing images for current session
if not os.path.exists(self.new_folder):
os.makedirs(self.new_folder)
# Run ScreenCords.py and get cordinates
cords_point = subprocess.check_output([sys.executable, "GetScreenCoordinates.py", "-l"])
cord_tuple = tuple(cords_point.decode("utf-8").rstrip().split(","))
# cordinates for screenshots and compare
self.cords = (int(cord_tuple[0]), int(cord_tuple[1]), int(cord_tuple[2]), int(cord_tuple[3]))
# save first image
img1 = grab(bbox=self.cords)
now = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
fname = self.new_folder + "/ScreenShots" + now + ".png"
img1.save(fname)
print("First Screenshot taken")
# start taking screenshot of next images
self.take_screenshots()
def take_screenshots(self):
# grab first and second image
img1 = grab(bbox=self.cords)
time.sleep(1)
img2 = grab(bbox=self.cords)
# check difference between images
diff = ImageChops.difference(img1, img2)
bbox = diff.getbbox()
if bbox is not None:
now = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
fname = self.new_folder + "/ScreenShots" + now + ".png"
img2.save(fname)
print("Screenshot taken")
root.after(5, self.take_screenshots)
def close(self):
quit()
if __name__ == "__main__":
root = Tk()
gui = AutoScreenshot(root)
root.mainloop()
| 32.833333
| 101
| 0.616602
| 407
| 3,349
| 4.955774
| 0.383292
| 0.023798
| 0.032226
| 0.029747
| 0.242935
| 0.226078
| 0.172533
| 0.172533
| 0.160635
| 0.108081
| 0
| 0.023829
| 0.260675
| 3,349
| 101
| 102
| 33.158416
| 0.790792
| 0.149
| 0
| 0.1
| 0
| 0
| 0.108757
| 0.008121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.15
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aed847e420c882fffa9edfe88238102ee06ac09
| 2,749
|
py
|
Python
|
rqalpha/utils/logger.py
|
HaidongHe/rqalpha
|
bb824178425909e051c456f6062a6c5bdc816421
|
[
"Apache-2.0"
] | 1
|
2020-11-10T05:44:39.000Z
|
2020-11-10T05:44:39.000Z
|
rqalpha/utils/logger.py
|
HaidongHe/rqalpha
|
bb824178425909e051c456f6062a6c5bdc816421
|
[
"Apache-2.0"
] | null | null | null |
rqalpha/utils/logger.py
|
HaidongHe/rqalpha
|
bb824178425909e051c456f6062a6c5bdc816421
|
[
"Apache-2.0"
] | 1
|
2020-03-05T05:06:45.000Z
|
2020-03-05T05:06:45.000Z
|
# -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 [email protected] 获取。
from datetime import datetime
import logbook
from logbook import Logger, StderrHandler
from rqalpha.utils.py2 import to_utf8
logbook.set_datetime_format("local")
# patch warn
logbook.base._level_names[logbook.base.WARNING] = 'WARN'
__all__ = [
"user_log",
"system_log",
"user_system_log",
]
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def user_std_handler_log_formatter(record, handler):
from rqalpha.environment import Environment
try:
dt = Environment.get_instance().calendar_dt.strftime(DATETIME_FORMAT)
except Exception:
dt = datetime.now().strftime(DATETIME_FORMAT)
log = "{dt} {level} {msg}".format(
dt=dt,
level=record.level_name,
msg=to_utf8(record.message),
)
return log
user_std_handler = StderrHandler(bubble=True)
user_std_handler.formatter = user_std_handler_log_formatter
def formatter_builder(tag):
def formatter(record, handler):
log = "[{formatter_tag}] [{time}] {level}: {msg}".format(
formatter_tag=tag,
level=record.level_name,
msg=to_utf8(record.message),
time=record.time,
)
if record.formatted_exception:
log += "\n" + record.formatted_exception
return log
return formatter
# loggers
# 用户代码logger日志
user_log = Logger("user_log")
# 给用户看的系统日志
user_system_log = Logger("user_system_log")
# 用于用户异常的详细日志打印
user_detail_log = Logger("user_detail_log")
# user_detail_log.handlers.append(StderrHandler(bubble=True))
# 系统日志
system_log = Logger("system_log")
basic_system_log = Logger("basic_system_log")
# 标准输出日志
std_log = Logger("std_log")
def init_logger():
system_log.handlers = [StderrHandler(bubble=True)]
basic_system_log.handlers = [StderrHandler(bubble=True)]
std_log.handlers = [StderrHandler(bubble=True)]
user_log.handlers = []
user_system_log.handlers = []
def user_print(*args, **kwargs):
sep = kwargs.get("sep", " ")
end = kwargs.get("end", "")
message = sep.join(map(str, args)) + end
user_log.info(message)
init_logger()
| 25.220183
| 144
| 0.694434
| 336
| 2,749
| 5.473214
| 0.395833
| 0.053834
| 0.062534
| 0.04894
| 0.135943
| 0.089179
| 0.045677
| 0.045677
| 0.045677
| 0
| 0
| 0.009329
| 0.181157
| 2,749
| 108
| 145
| 25.453704
| 0.807641
| 0.276464
| 0
| 0.107143
| 0
| 0
| 0.102186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0
| 0.089286
| 0
| 0.232143
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aee73a3b8946a07512f9eca678734d10d671560
| 5,517
|
py
|
Python
|
salt/modules/oracle.py
|
wikimedia/operations-debs-salt
|
be6342abc7401ff92f67ed59f7834f1359f35314
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/oracle.py
|
wikimedia/operations-debs-salt
|
be6342abc7401ff92f67ed59f7834f1359f35314
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/oracle.py
|
wikimedia/operations-debs-salt
|
be6342abc7401ff92f67ed59f7834f1359f35314
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Oracle DataBase connection module
:mainteiner: Vladimir Bormotov <[email protected]>
:maturity: new
:depends: cx_Oracle
:platform: all
:configuration: module provide connections for multiple Oracle DB instances.
**OS Environment**
.. code-block:: text
ORACLE_HOME: path to oracle product
PATH: path to Oracle Client libs need to be in PATH
**pillar**
.. code-block:: text
oracle.dbs: list of known based
oracle.dbs.<db>.uri: connection credentials in format:
user/password@host[:port]/sid[ as {sysdba|sysoper}]
'''
import os
import logging
from salt.utils.decorators import depends
log = logging.getLogger(__name__)
try:
import cx_Oracle
MODE = {
'sysdba': cx_Oracle.SYSDBA,
'sysoper': cx_Oracle.SYSOPER
}
HAS_CX_ORACLE = True
except ImportError:
MODE = {'sysdba': 2, 'sysoper': 4}
HAS_CX_ORACLE = False
__virtualname__ = 'oracle'
def __virtual__():
'''
Load module only if cx_Oracle installed
'''
return __virtualname__ if HAS_CX_ORACLE else False
def _cx_oracle_req():
'''
Fallback function stub
'''
return 'Need "cx_Oracle" and Oracle Client installed for this functin exist'
def _unicode_output(cursor, name, default_type, size, precision, scale):
'''
Return strings values as python unicode string
http://www.oracle.com/technetwork/articles/dsl/tuininga-cx-oracle-084866.html
'''
if default_type in (cx_Oracle.STRING, cx_Oracle.LONG_STRING,
cx_Oracle.FIXED_CHAR, cx_Oracle.CLOB):
return cursor.var(unicode, size, cursor.arraysize)
def _connect(uri):
'''
uri = user/password@host[:port]/sid[ as {sysdba|sysoper}]
Return cx_Oracle.Connection instance
'''
# cx_Oracle.Connection() not support 'as sysdba' syntax
uri_l = uri.rsplit(' as ', 1)
if len(uri_l) == 2:
credentials, mode = uri_l
mode = MODE[mode]
else:
credentials = uri_l[0]
mode = 0
userpass, hostportsid = credentials.split('@')
user, password = userpass.split('/')
hostport, sid = hostportsid.split('/')
hostport_l = hostport.split(':')
if len(hostport_l) == 2:
host, port = hostport_l
else:
host = hostport_l[0]
port = 1521
log.debug('connect: {0}'.format((user, password, host, port, sid, mode)))
# force UTF-8 client encoding
os.environ['NLS_LANG'] = '.AL32UTF8'
conn = cx_Oracle.connect(user, password,
cx_Oracle.makedsn(host, port, sid),
mode)
conn.outputtypehandler = _unicode_output
return conn
@depends('cx_Oracle', fallback_function=_cx_oracle_req)
def run_query(db, query):
'''
Run SQL query and return result
CLI example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table"
'''
log.debug('run query on {0}: {1}'.format(db, query))
conn = _connect(show_dbs(db)[db]['uri'])
return conn.cursor().execute(query).fetchall()
def show_dbs(*dbs):
'''
Show databases configuration from pillar. Filter by args
.. code-block:: bash
salt '*' oracle.show_dbs
salt '*' oracle.show_dbs my_db
'''
if dbs:
log.debug('get dbs from pillar: {0}'.format(dbs))
result = {}
for db in dbs:
result[db] = __salt__['pillar.get']('oracle:dbs:' + db)
return result
else:
pillar_dbs = __salt__['pillar.get']('oracle:dbs')
log.debug('get all ({0}) dbs from pillar'.format(len(pillar_dbs)))
return pillar_dbs
@depends('cx_Oracle', fallback_function=_cx_oracle_req)
def version(*dbs):
'''
Server Version (select banner from v$version)
CLI Example:
.. code-block:: bash
salt '*' oracle.version
salt '*' oracle.version my_db
'''
pillar_dbs = __salt__['pillar.get']('oracle:dbs')
get_version = lambda x: [
r[0] for r in run_query(x, "select banner from v$version order by banner")
]
result = {}
if dbs:
log.debug('get db versions for: {0}'.format(dbs))
for db in dbs:
if db in pillar_dbs:
result[db] = get_version(db)
else:
log.debug('get all({0}) dbs versions'.format(len(dbs)))
for db in dbs:
result[db] = get_version(db)
return result
@depends('cx_Oracle', fallback_function=_cx_oracle_req)
def client_version():
'''
Oracle Client Version
CLI Example:
.. code-block:: bash
salt '*' oracle.client_version
'''
return '.'.join((str(x) for x in cx_Oracle.clientversion()))
def show_pillar(item=None):
'''
Show Pillar segment oracle.* and subitem with notation "item:subitem"
CLI Example:
.. code-block:: bash
salt '*' oracle.show_pillar
salt '*' oracle.show_pillar dbs:my_db
'''
if item:
return __salt__['pillar.get']('oracle:' + item)
else:
return __salt__['pillar.get']('oracle')
def show_env():
'''
Show Environment used by Oracle Client
CLI Example:
.. code-block:: bash
salt '*' oracle.show_env
.. note::
at first _connect() ``NLS_LANG`` will forced to '.AL32UTF8'
'''
envs = ['PATH', 'ORACLE_HOME', 'TNS_ADMIN', 'NLS_LANG']
result = {}
for env in envs:
if env in os.environ:
result[env] = os.environ[env]
return result
| 24.52
| 82
| 0.610839
| 701
| 5,517
| 4.626248
| 0.269615
| 0.064138
| 0.024052
| 0.031452
| 0.243602
| 0.193956
| 0.141844
| 0.11255
| 0.041628
| 0
| 0
| 0.008354
| 0.26228
| 5,517
| 224
| 83
| 24.629464
| 0.788452
| 0.334783
| 0
| 0.242105
| 0
| 0
| 0.134739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0.042105
| 0.052632
| 0
| 0.284211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aef1e728fe8745d27da0badcde01e88381bd9b3
| 32,785
|
py
|
Python
|
tests/test_std.py
|
ashwini-balnaves/python-consul
|
4ddec9b57eb5284b58967ce1a9b2422519f88cc2
|
[
"MIT"
] | 469
|
2015-01-02T18:36:39.000Z
|
2022-03-10T09:18:13.000Z
|
tests/test_std.py
|
ashwini-balnaves/python-consul
|
4ddec9b57eb5284b58967ce1a9b2422519f88cc2
|
[
"MIT"
] | 249
|
2015-01-21T19:06:34.000Z
|
2022-01-12T09:12:58.000Z
|
tests/test_std.py
|
ashwini-balnaves/python-consul
|
4ddec9b57eb5284b58967ce1a9b2422519f88cc2
|
[
"MIT"
] | 279
|
2015-01-17T04:25:04.000Z
|
2022-03-11T22:06:46.000Z
|
import base64
import operator
import struct
import time
import pytest
import six
import consul
import consul.std
Check = consul.Check
class TestHTTPClient(object):
def test_uri(self):
http = consul.std.HTTPClient()
assert http.uri('/v1/kv') == 'http://127.0.0.1:8500/v1/kv'
assert http.uri('/v1/kv', params={'index': 1}) == \
'http://127.0.0.1:8500/v1/kv?index=1'
class TestConsul(object):
def test_kv(self, consul_port):
c = consul.Consul(port=consul_port)
index, data = c.kv.get('foo')
assert data is None
assert c.kv.put('foo', 'bar') is True
index, data = c.kv.get('foo')
assert data['Value'] == six.b('bar')
def test_kv_wait(self, consul_port):
c = consul.Consul(port=consul_port)
assert c.kv.put('foo', 'bar') is True
index, data = c.kv.get('foo')
check, data = c.kv.get('foo', index=index, wait='20ms')
assert index == check
def test_kv_encoding(self, consul_port):
c = consul.Consul(port=consul_port)
# test binary
c.kv.put('foo', struct.pack('i', 1000))
index, data = c.kv.get('foo')
assert struct.unpack('i', data['Value']) == (1000,)
# test unicode
c.kv.put('foo', u'bar')
index, data = c.kv.get('foo')
assert data['Value'] == six.b('bar')
# test empty-string comes back as `None`
c.kv.put('foo', '')
index, data = c.kv.get('foo')
assert data['Value'] is None
# test None
c.kv.put('foo', None)
index, data = c.kv.get('foo')
assert data['Value'] is None
# check unencoded values raises assert
pytest.raises(AssertionError, c.kv.put, 'foo', {1: 2})
def test_kv_put_cas(self, consul_port):
c = consul.Consul(port=consul_port)
assert c.kv.put('foo', 'bar', cas=50) is False
assert c.kv.put('foo', 'bar', cas=0) is True
index, data = c.kv.get('foo')
assert c.kv.put('foo', 'bar2', cas=data['ModifyIndex']-1) is False
assert c.kv.put('foo', 'bar2', cas=data['ModifyIndex']) is True
index, data = c.kv.get('foo')
assert data['Value'] == six.b('bar2')
def test_kv_put_flags(self, consul_port):
c = consul.Consul(port=consul_port)
c.kv.put('foo', 'bar')
index, data = c.kv.get('foo')
assert data['Flags'] == 0
assert c.kv.put('foo', 'bar', flags=50) is True
index, data = c.kv.get('foo')
assert data['Flags'] == 50
def test_kv_recurse(self, consul_port):
c = consul.Consul(port=consul_port)
index, data = c.kv.get('foo/', recurse=True)
assert data is None
c.kv.put('foo/', None)
index, data = c.kv.get('foo/', recurse=True)
assert len(data) == 1
c.kv.put('foo/bar1', '1')
c.kv.put('foo/bar2', '2')
c.kv.put('foo/bar3', '3')
index, data = c.kv.get('foo/', recurse=True)
assert [x['Key'] for x in data] == [
'foo/', 'foo/bar1', 'foo/bar2', 'foo/bar3']
assert [x['Value'] for x in data] == [
None, six.b('1'), six.b('2'), six.b('3')]
def test_kv_delete(self, consul_port):
c = consul.Consul(port=consul_port)
c.kv.put('foo1', '1')
c.kv.put('foo2', '2')
c.kv.put('foo3', '3')
index, data = c.kv.get('foo', recurse=True)
assert [x['Key'] for x in data] == ['foo1', 'foo2', 'foo3']
assert c.kv.delete('foo2') is True
index, data = c.kv.get('foo', recurse=True)
assert [x['Key'] for x in data] == ['foo1', 'foo3']
assert c.kv.delete('foo', recurse=True) is True
index, data = c.kv.get('foo', recurse=True)
assert data is None
def test_kv_delete_cas(self, consul_port):
c = consul.Consul(port=consul_port)
c.kv.put('foo', 'bar')
index, data = c.kv.get('foo')
assert c.kv.delete('foo', cas=data['ModifyIndex']-1) is False
assert c.kv.get('foo') == (index, data)
assert c.kv.delete('foo', cas=data['ModifyIndex']) is True
index, data = c.kv.get('foo')
assert data is None
def test_kv_acquire_release(self, consul_port):
c = consul.Consul(port=consul_port)
pytest.raises(
consul.ConsulException, c.kv.put, 'foo', 'bar', acquire='foo')
s1 = c.session.create()
s2 = c.session.create()
assert c.kv.put('foo', '1', acquire=s1) is True
assert c.kv.put('foo', '2', acquire=s2) is False
assert c.kv.put('foo', '1', acquire=s1) is True
assert c.kv.put('foo', '1', release='foo') is False
assert c.kv.put('foo', '2', release=s2) is False
assert c.kv.put('foo', '2', release=s1) is True
c.session.destroy(s1)
c.session.destroy(s2)
def test_kv_keys_only(self, consul_port):
c = consul.Consul(port=consul_port)
assert c.kv.put('bar', '4') is True
assert c.kv.put('base/foo', '1') is True
assert c.kv.put('base/base/foo', '5') is True
index, data = c.kv.get('base/', keys=True, separator='/')
assert data == ['base/base/', 'base/foo']
def test_transaction(self, consul_port):
c = consul.Consul(port=consul_port)
value = base64.b64encode(b"1").decode("utf8")
d = {"KV": {"Verb": "set", "Key": "asdf", "Value": value}}
r = c.txn.put([d])
assert r["Errors"] is None
d = {"KV": {"Verb": "get", "Key": "asdf"}}
r = c.txn.put([d])
assert r["Results"][0]["KV"]["Value"] == value
def test_event(self, consul_port):
c = consul.Consul(port=consul_port)
assert c.event.fire("fooname", "foobody")
index, events = c.event.list()
assert [x['Name'] == 'fooname' for x in events]
assert [x['Payload'] == 'foobody' for x in events]
def test_event_targeted(self, consul_port):
c = consul.Consul(port=consul_port)
assert c.event.fire("fooname", "foobody")
index, events = c.event.list(name="othername")
assert events == []
index, events = c.event.list(name="fooname")
assert [x['Name'] == 'fooname' for x in events]
assert [x['Payload'] == 'foobody' for x in events]
def test_agent_checks(self, consul_port):
c = consul.Consul(port=consul_port)
def verify_and_dereg_check(check_id):
assert set(c.agent.checks().keys()) == set([check_id])
assert c.agent.check.deregister(check_id) is True
assert set(c.agent.checks().keys()) == set([])
def verify_check_status(check_id, status, notes=None):
checks = c.agent.checks()
assert checks[check_id]['Status'] == status
if notes:
assert checks[check_id]['Output'] == notes
# test setting notes on a check
c.agent.check.register('check', Check.ttl('1s'), notes='foo')
assert c.agent.checks()['check']['Notes'] == 'foo'
c.agent.check.deregister('check')
assert set(c.agent.checks().keys()) == set([])
assert c.agent.check.register(
'script_check', Check.script('/bin/true', 10)) is True
verify_and_dereg_check('script_check')
assert c.agent.check.register(
'check name',
Check.script('/bin/true', 10),
check_id='check_id') is True
verify_and_dereg_check('check_id')
http_addr = "http://127.0.0.1:{0}".format(consul_port)
assert c.agent.check.register(
'http_check', Check.http(http_addr, '10ms')) is True
time.sleep(1)
verify_check_status('http_check', 'passing')
verify_and_dereg_check('http_check')
assert c.agent.check.register(
'http_timeout_check',
Check.http(http_addr, '100ms', timeout='2s')) is True
verify_and_dereg_check('http_timeout_check')
assert c.agent.check.register('ttl_check', Check.ttl('100ms')) is True
assert c.agent.check.ttl_warn('ttl_check') is True
verify_check_status('ttl_check', 'warning')
assert c.agent.check.ttl_warn(
'ttl_check', notes='its not quite right') is True
verify_check_status('ttl_check', 'warning', 'its not quite right')
assert c.agent.check.ttl_fail('ttl_check') is True
verify_check_status('ttl_check', 'critical')
assert c.agent.check.ttl_fail(
'ttl_check', notes='something went boink!') is True
verify_check_status(
'ttl_check', 'critical', notes='something went boink!')
assert c.agent.check.ttl_pass('ttl_check') is True
verify_check_status('ttl_check', 'passing')
assert c.agent.check.ttl_pass(
'ttl_check', notes='all hunky dory!') is True
verify_check_status('ttl_check', 'passing', notes='all hunky dory!')
# wait for ttl to expire
time.sleep(120/1000.0)
verify_check_status('ttl_check', 'critical')
verify_and_dereg_check('ttl_check')
def test_service_dereg_issue_156(self, consul_port):
# https://github.com/cablehead/python-consul/issues/156
service_name = 'app#127.0.0.1#3000'
c = consul.Consul(port=consul_port)
c.agent.service.register(service_name)
time.sleep(80/1000.0)
index, nodes = c.health.service(service_name)
assert [node['Service']['ID'] for node in nodes] == [service_name]
# Clean up tasks
assert c.agent.service.deregister(service_name) is True
time.sleep(40/1000.0)
index, nodes = c.health.service(service_name)
assert [node['Service']['ID'] for node in nodes] == []
def test_agent_checks_service_id(self, consul_port):
c = consul.Consul(port=consul_port)
c.agent.service.register('foo1')
time.sleep(40/1000.0)
index, nodes = c.health.service('foo1')
assert [node['Service']['ID'] for node in nodes] == ['foo1']
c.agent.check.register('foo', Check.ttl('100ms'), service_id='foo1')
time.sleep(40/1000.0)
index, nodes = c.health.service('foo1')
assert set([
check['ServiceID'] for node in nodes
for check in node['Checks']]) == set(['foo1', ''])
assert set([
check['CheckID'] for node in nodes
for check in node['Checks']]) == set(['foo', 'serfHealth'])
# Clean up tasks
assert c.agent.check.deregister('foo') is True
time.sleep(40/1000.0)
assert c.agent.service.deregister('foo1') is True
time.sleep(40/1000.0)
def test_agent_register_check_no_service_id(self, consul_port):
c = consul.Consul(port=consul_port)
index, nodes = c.health.service("foo1")
assert nodes == []
pytest.raises(consul.std.base.ConsulException,
c.agent.check.register,
'foo', Check.ttl('100ms'),
service_id='foo1')
time.sleep(40/1000.0)
assert c.agent.checks() == {}
# Cleanup tasks
c.agent.check.deregister('foo')
time.sleep(40/1000.0)
def test_agent_register_enable_tag_override(self, consul_port):
c = consul.Consul(port=consul_port)
index, nodes = c.health.service("foo1")
assert nodes == []
c.agent.service.register('foo', enable_tag_override=True)
assert c.agent.services()['foo']['EnableTagOverride']
# Cleanup tasks
c.agent.check.deregister('foo')
def test_agent_service_maintenance(self, consul_port):
c = consul.Consul(port=consul_port)
c.agent.service.register('foo', check=Check.ttl('100ms'))
time.sleep(40/1000.0)
c.agent.service.maintenance('foo', 'true', "test")
time.sleep(40/1000.0)
checks_pre = c.agent.checks()
assert '_service_maintenance:foo' in checks_pre.keys()
assert 'test' == checks_pre['_service_maintenance:foo']['Notes']
c.agent.service.maintenance('foo', 'false')
time.sleep(40/1000.0)
checks_post = c.agent.checks()
assert '_service_maintenance:foo' not in checks_post.keys()
# Cleanup
c.agent.service.deregister('foo')
time.sleep(40/1000.0)
def test_agent_node_maintenance(self, consul_port):
c = consul.Consul(port=consul_port)
c.agent.maintenance('true', "test")
time.sleep(40/1000.0)
checks_pre = c.agent.checks()
assert '_node_maintenance' in checks_pre.keys()
assert 'test' == checks_pre['_node_maintenance']['Notes']
c.agent.maintenance('false')
time.sleep(40/1000.0)
checks_post = c.agent.checks()
assert '_node_maintenance' not in checks_post.keys()
def test_agent_members(self, consul_port):
c = consul.Consul(port=consul_port)
members = c.agent.members()
for x in members:
assert x['Status'] == 1
assert not x['Name'] is None
assert not x['Tags'] is None
assert c.agent.self()['Member'] in members
wan_members = c.agent.members(wan=True)
for x in wan_members:
assert 'dc1' in x['Name']
def test_agent_self(self, consul_port):
c = consul.Consul(port=consul_port)
assert set(c.agent.self().keys()) == set(['Member', 'Stats', 'Config',
'Coord', 'DebugConfig',
'Meta'])
def test_agent_services(self, consul_port):
c = consul.Consul(port=consul_port)
assert c.agent.service.register('foo') is True
assert set(c.agent.services().keys()) == set(['foo'])
assert c.agent.service.deregister('foo') is True
assert set(c.agent.services().keys()) == set()
# test address param
assert c.agent.service.register('foo', address='10.10.10.1') is True
assert [
v['Address'] for k, v in c.agent.services().items()
if k == 'foo'][0] == '10.10.10.1'
assert c.agent.service.deregister('foo') is True
def test_catalog(self, consul_port):
c = consul.Consul(port=consul_port)
# grab the node our server created, so we can ignore it
_, nodes = c.catalog.nodes()
assert len(nodes) == 1
current = nodes[0]
# test catalog.datacenters
assert c.catalog.datacenters() == ['dc1']
# test catalog.register
pytest.raises(
consul.ConsulException,
c.catalog.register, 'foo', '10.1.10.11', dc='dc2')
assert c.catalog.register(
'n1',
'10.1.10.11',
service={'service': 's1'},
check={'name': 'c1'}) is True
assert c.catalog.register(
'n1', '10.1.10.11', service={'service': 's2'}) is True
assert c.catalog.register(
'n2', '10.1.10.12',
service={'service': 's1', 'tags': ['master']}) is True
# test catalog.nodes
pytest.raises(consul.ConsulException, c.catalog.nodes, dc='dc2')
_, nodes = c.catalog.nodes()
nodes.remove(current)
assert [x['Node'] for x in nodes] == ['n1', 'n2']
# test catalog.services
pytest.raises(consul.ConsulException, c.catalog.services, dc='dc2')
_, services = c.catalog.services()
assert services == {'s1': [u'master'], 's2': [], 'consul': []}
# test catalog.node
pytest.raises(consul.ConsulException, c.catalog.node, 'n1', dc='dc2')
_, node = c.catalog.node('n1')
assert set(node['Services'].keys()) == set(['s1', 's2'])
_, node = c.catalog.node('n3')
assert node is None
# test catalog.service
pytest.raises(
consul.ConsulException, c.catalog.service, 's1', dc='dc2')
_, nodes = c.catalog.service('s1')
assert set([x['Node'] for x in nodes]) == set(['n1', 'n2'])
_, nodes = c.catalog.service('s1', tag='master')
assert set([x['Node'] for x in nodes]) == set(['n2'])
# test catalog.deregister
pytest.raises(
consul.ConsulException, c.catalog.deregister, 'n2', dc='dc2')
assert c.catalog.deregister('n1', check_id='c1') is True
assert c.catalog.deregister('n2', service_id='s1') is True
# check the nodes weren't removed
_, nodes = c.catalog.nodes()
nodes.remove(current)
assert [x['Node'] for x in nodes] == ['n1', 'n2']
# check n2's s1 service was removed though
_, nodes = c.catalog.service('s1')
assert set([x['Node'] for x in nodes]) == set(['n1'])
# cleanup
assert c.catalog.deregister('n1') is True
assert c.catalog.deregister('n2') is True
_, nodes = c.catalog.nodes()
nodes.remove(current)
assert [x['Node'] for x in nodes] == []
def test_health_service(self, consul_port):
c = consul.Consul(port=consul_port)
# check there are no nodes for the service 'foo'
index, nodes = c.health.service('foo')
assert nodes == []
# register two nodes, one with a long ttl, the other shorter
c.agent.service.register(
'foo',
service_id='foo:1',
check=Check.ttl('10s'),
tags=['tag:foo:1'])
c.agent.service.register(
'foo', service_id='foo:2', check=Check.ttl('100ms'))
time.sleep(40/1000.0)
# check the nodes show for the /health/service endpoint
index, nodes = c.health.service('foo')
assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2']
# but that they aren't passing their health check
index, nodes = c.health.service('foo', passing=True)
assert nodes == []
# ping the two node's health check
c.agent.check.ttl_pass('service:foo:1')
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# both nodes are now available
index, nodes = c.health.service('foo', passing=True)
assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2']
# wait until the short ttl node fails
time.sleep(120/1000.0)
# only one node available
index, nodes = c.health.service('foo', passing=True)
assert [node['Service']['ID'] for node in nodes] == ['foo:1']
# ping the failed node's health check
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# check both nodes are available
index, nodes = c.health.service('foo', passing=True)
assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2']
# check that tag works
index, nodes = c.health.service('foo', tag='tag:foo:1')
assert [node['Service']['ID'] for node in nodes] == ['foo:1']
# deregister the nodes
c.agent.service.deregister('foo:1')
c.agent.service.deregister('foo:2')
time.sleep(40/1000.0)
index, nodes = c.health.service('foo')
assert nodes == []
def test_health_state(self, consul_port):
c = consul.Consul(port=consul_port)
# The empty string is for the Serf Health Status check, which has an
# empty ServiceID
index, nodes = c.health.state('any')
assert [node['ServiceID'] for node in nodes] == ['']
# register two nodes, one with a long ttl, the other shorter
c.agent.service.register(
'foo', service_id='foo:1', check=Check.ttl('10s'))
c.agent.service.register(
'foo', service_id='foo:2', check=Check.ttl('100ms'))
time.sleep(40/1000.0)
# check the nodes show for the /health/state/any endpoint
index, nodes = c.health.state('any')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1', 'foo:2'])
# but that they aren't passing their health check
index, nodes = c.health.state('passing')
assert [node['ServiceID'] for node in nodes] != 'foo'
# ping the two node's health check
c.agent.check.ttl_pass('service:foo:1')
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# both nodes are now available
index, nodes = c.health.state('passing')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1', 'foo:2'])
# wait until the short ttl node fails
time.sleep(2200/1000.0)
# only one node available
index, nodes = c.health.state('passing')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1'])
# ping the failed node's health check
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# check both nodes are available
index, nodes = c.health.state('passing')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1', 'foo:2'])
# deregister the nodes
c.agent.service.deregister('foo:1')
c.agent.service.deregister('foo:2')
time.sleep(40/1000.0)
index, nodes = c.health.state('any')
assert [node['ServiceID'] for node in nodes] == ['']
def test_health_node(self, consul_port):
c = consul.Consul(port=consul_port)
# grab local node name
node = c.agent.self()['Config']['NodeName']
index, checks = c.health.node(node)
assert node in [check["Node"] for check in checks]
def test_health_checks(self, consul_port):
c = consul.Consul(port=consul_port)
c.agent.service.register(
'foobar', service_id='foobar', check=Check.ttl('10s'))
time.sleep(40/1000.00)
index, checks = c.health.checks('foobar')
assert [check['ServiceID'] for check in checks] == ['foobar']
assert [check['CheckID'] for check in checks] == ['service:foobar']
c.agent.service.deregister('foobar')
time.sleep(40/1000.0)
index, checks = c.health.checks('foobar')
assert len(checks) == 0
def test_session(self, consul_port):
c = consul.Consul(port=consul_port)
# session.create
pytest.raises(consul.ConsulException, c.session.create, node='n2')
pytest.raises(consul.ConsulException, c.session.create, dc='dc2')
session_id = c.session.create('my-session')
# session.list
pytest.raises(consul.ConsulException, c.session.list, dc='dc2')
_, sessions = c.session.list()
assert [x['Name'] for x in sessions] == ['my-session']
# session.info
pytest.raises(
consul.ConsulException, c.session.info, session_id, dc='dc2')
index, session = c.session.info('1'*36)
assert session is None
index, session = c.session.info(session_id)
assert session['Name'] == 'my-session'
# session.node
node = session['Node']
pytest.raises(
consul.ConsulException, c.session.node, node, dc='dc2')
_, sessions = c.session.node(node)
assert [x['Name'] for x in sessions] == ['my-session']
# session.destroy
pytest.raises(
consul.ConsulException, c.session.destroy, session_id, dc='dc2')
assert c.session.destroy(session_id) is True
_, sessions = c.session.list()
assert sessions == []
def test_session_delete_ttl_renew(self, consul_port):
c = consul.Consul(port=consul_port)
s = c.session.create(behavior='delete', ttl=20)
# attempt to renew an unknown session
pytest.raises(consul.NotFound, c.session.renew, '1'*36)
session = c.session.renew(s)
assert session['Behavior'] == 'delete'
assert session['TTL'] == '20s'
# trying out the behavior
assert c.kv.put('foo', '1', acquire=s) is True
index, data = c.kv.get('foo')
assert data['Value'] == six.b('1')
c.session.destroy(s)
index, data = c.kv.get('foo')
assert data is None
def test_acl_disabled(self, consul_port):
c = consul.Consul(port=consul_port)
pytest.raises(consul.ACLDisabled, c.acl.list)
pytest.raises(consul.ACLDisabled, c.acl.info, '1'*36)
pytest.raises(consul.ACLDisabled, c.acl.create)
pytest.raises(consul.ACLDisabled, c.acl.update, 'foo')
pytest.raises(consul.ACLDisabled, c.acl.clone, 'foo')
pytest.raises(consul.ACLDisabled, c.acl.destroy, 'foo')
def test_acl_permission_denied(self, acl_consul):
c = consul.Consul(port=acl_consul.port)
pytest.raises(consul.ACLPermissionDenied, c.acl.list)
pytest.raises(consul.ACLPermissionDenied, c.acl.create)
pytest.raises(consul.ACLPermissionDenied, c.acl.update, 'anonymous')
pytest.raises(consul.ACLPermissionDenied, c.acl.clone, 'anonymous')
pytest.raises(consul.ACLPermissionDenied, c.acl.destroy, 'anonymous')
def test_acl_explict_token_use(self, acl_consul):
c = consul.Consul(port=acl_consul.port)
master_token = acl_consul.token
acls = c.acl.list(token=master_token)
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
assert c.acl.info('1'*36) is None
compare = [c.acl.info(master_token), c.acl.info('anonymous')]
compare.sort(key=operator.itemgetter('ID'))
assert acls == compare
rules = """
key "" {
policy = "read"
}
key "private/" {
policy = "deny"
}
service "foo-" {
policy = "write"
}
service "bar-" {
policy = "read"
}
"""
token = c.acl.create(rules=rules, token=master_token)
assert c.acl.info(token)['Rules'] == rules
token2 = c.acl.clone(token, token=master_token)
assert c.acl.info(token2)['Rules'] == rules
assert c.acl.update(token2, name='Foo', token=master_token) == token2
assert c.acl.info(token2)['Name'] == 'Foo'
assert c.acl.destroy(token2, token=master_token) is True
assert c.acl.info(token2) is None
c.kv.put('foo', 'bar')
c.kv.put('private/foo', 'bar')
assert c.kv.get('foo', token=token)[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied, c.kv.put, 'foo', 'bar2', token=token)
pytest.raises(
consul.ACLPermissionDenied, c.kv.delete, 'foo', token=token)
assert c.kv.get('private/foo')[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied,
c.kv.get, 'private/foo', token=token)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.put, 'private/foo', 'bar2', token=token)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.delete, 'private/foo', token=token)
# test token pass through for service registration
pytest.raises(
consul.ACLPermissionDenied,
c.agent.service.register, "bar-1", token=token)
c.agent.service.register("foo-1", token=token)
index, data = c.health.service('foo-1', token=token)
assert data[0]['Service']['ID'] == "foo-1"
index, data = c.health.checks('foo-1', token=token)
assert data == []
index, data = c.health.service('bar-1', token=token)
assert not data
# clean up
assert c.agent.service.deregister('foo-1') is True
c.acl.destroy(token, token=master_token)
acls = c.acl.list(token=master_token)
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
def test_acl_implicit_token_use(self, acl_consul):
# configure client to use the master token by default
c = consul.Consul(port=acl_consul.port, token=acl_consul.token)
master_token = acl_consul.token
acls = c.acl.list()
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
assert c.acl.info('foo') is None
compare = [c.acl.info(master_token), c.acl.info('anonymous')]
compare.sort(key=operator.itemgetter('ID'))
assert acls == compare
rules = """
key "" {
policy = "read"
}
key "private/" {
policy = "deny"
}
"""
token = c.acl.create(rules=rules)
assert c.acl.info(token)['Rules'] == rules
token2 = c.acl.clone(token)
assert c.acl.info(token2)['Rules'] == rules
assert c.acl.update(token2, name='Foo') == token2
assert c.acl.info(token2)['Name'] == 'Foo'
assert c.acl.destroy(token2) is True
assert c.acl.info(token2) is None
c.kv.put('foo', 'bar')
c.kv.put('private/foo', 'bar')
c_limited = consul.Consul(port=acl_consul.port, token=token)
assert c_limited.kv.get('foo')[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied, c_limited.kv.put, 'foo', 'bar2')
pytest.raises(
consul.ACLPermissionDenied, c_limited.kv.delete, 'foo')
assert c.kv.get('private/foo')[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied,
c_limited.kv.get, 'private/foo')
pytest.raises(
consul.ACLPermissionDenied,
c_limited.kv.put, 'private/foo', 'bar2')
pytest.raises(
consul.ACLPermissionDenied,
c_limited.kv.delete, 'private/foo')
# check we can override the client's default token
pytest.raises(
consul.ACLPermissionDenied,
c.kv.get, 'private/foo', token=token
)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.put, 'private/foo', 'bar2', token=token)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.delete, 'private/foo', token=token)
# clean up
c.acl.destroy(token)
acls = c.acl.list()
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
def test_status_leader(self, consul_port):
c = consul.Consul(port=consul_port)
agent_self = c.agent.self()
leader = c.status.leader()
addr_port = agent_self['Stats']['consul']['leader_addr']
assert leader == addr_port, \
"Leader value was {0}, expected value " \
"was {1}".format(leader, addr_port)
def test_status_peers(self, consul_port):
c = consul.Consul(port=consul_port)
agent_self = c.agent.self()
addr_port = agent_self['Stats']['consul']['leader_addr']
peers = c.status.peers()
assert addr_port in peers, \
"Expected value '{0}' " \
"in peer list but it was not present".format(addr_port)
def test_query(self, consul_port):
c = consul.Consul(port=consul_port)
# check that query list is empty
queries = c.query.list()
assert queries == []
# create a new named query
query_service = 'foo'
query_name = 'fooquery'
query = c.query.create(query_service, query_name)
# assert response contains query ID
assert 'ID' in query \
and query['ID'] is not None \
and str(query['ID']) != ''
# retrieve query using id and name
queries = c.query.get(query['ID'])
assert queries != [] \
and len(queries) == 1
assert queries[0]['Name'] == query_name \
and queries[0]['ID'] == query['ID']
# explain query
assert c.query.explain(query_name)['Query']
# delete query
assert c.query.delete(query['ID'])
def test_coordinate(self, consul_port):
c = consul.Consul(port=consul_port)
c.coordinate.nodes()
c.coordinate.datacenters()
assert set(c.coordinate.datacenters()[0].keys()) == \
set(['Datacenter', 'Coordinates', 'AreaID'])
def test_operator(self, consul_port):
c = consul.Consul(port=consul_port)
config = c.operator.raft_config()
assert config["Index"] == 1
leader = False
voter = False
for server in config["Servers"]:
if server["Leader"]:
leader = True
if server["Voter"]:
voter = True
assert leader
assert voter
| 34.766702
| 78
| 0.574257
| 4,296
| 32,785
| 4.297952
| 0.079376
| 0.063367
| 0.026213
| 0.035908
| 0.702773
| 0.64206
| 0.569053
| 0.515327
| 0.474328
| 0.431163
| 0
| 0.023165
| 0.275797
| 32,785
| 942
| 79
| 34.803609
| 0.754496
| 0.059539
| 0
| 0.433333
| 0
| 0
| 0.11702
| 0.00234
| 0
| 0
| 0
| 0
| 0.286364
| 1
| 0.063636
| false
| 0.028788
| 0.012121
| 0
| 0.078788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af148a08a578a5383b105b30ec3598b62b9c1f1
| 385
|
py
|
Python
|
Q58/sol.py
|
shivamT95/projecteuler
|
3e87b64235edd8444bc27198717a38e0ae0e0c0b
|
[
"MIT"
] | null | null | null |
Q58/sol.py
|
shivamT95/projecteuler
|
3e87b64235edd8444bc27198717a38e0ae0e0c0b
|
[
"MIT"
] | null | null | null |
Q58/sol.py
|
shivamT95/projecteuler
|
3e87b64235edd8444bc27198717a38e0ae0e0c0b
|
[
"MIT"
] | null | null | null |
import math
def is_prime(n):
if n == 1:
return False
if n % 2 == 0 and n > 2:
return False
return all(n % i for i in range(3, int(math.sqrt(n))+1,2))
tot = 1
dia = 0
for side_length in range(3,100001,2):
hi = side_length**2
for i in range(4):
if is_prime(hi-i*side_length+i):
dia = dia+1
tot = tot+4
if dia/tot < 0.1:
print(side_length)
break
| 18.333333
| 60
| 0.592208
| 79
| 385
| 2.810127
| 0.379747
| 0.18018
| 0.054054
| 0.099099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081851
| 0.27013
| 385
| 20
| 61
| 19.25
| 0.708185
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.277778
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af16d0caa3aded0dbc0cbf9957a4f9e9107ae10
| 1,530
|
py
|
Python
|
lesson_07/02.py
|
alexartwww/geekbrains
|
f58720dc1d29bc94201b8b9c9239813c0d14ed64
|
[
"MIT"
] | null | null | null |
lesson_07/02.py
|
alexartwww/geekbrains
|
f58720dc1d29bc94201b8b9c9239813c0d14ed64
|
[
"MIT"
] | null | null | null |
lesson_07/02.py
|
alexartwww/geekbrains
|
f58720dc1d29bc94201b8b9c9239813c0d14ed64
|
[
"MIT"
] | null | null | null |
task = '''
Реализовать проект расчета суммарного расхода ткани на производство одежды.
Основная сущность (класс) этого проекта — одежда, которая может иметь
определенное название. К типам одежды в этом проекте относятся пальто и костюм.
У этих типов одежды существуют параметры: размер (для пальто) и рост (для костюма).
Это могут быть обычные числа: V и H, соответственно.
Для определения расхода ткани по каждому типу одежды использовать формулы:
для пальто (V/6.5 + 0.5), для костюма (2 * H + 0.3). Проверить работу этих методов
на реальных данных.
Реализовать общий подсчет расхода ткани. Проверить на практике полученные
на этом уроке знания: реализовать абстрактные классы для основных классов проекта,
проверить на практике работу декоратора @property.
'''
class Clothes:
@property
def need_material(self):
raise NotImplementedError("Необходимо переопределить метод")
class Costume(Clothes):
def __init__(self, v):
self.v = v
@property
def need_material(self):
return (self.v / 6.5 + 0.5)
class Coat(Clothes):
def __init__(self, h):
self.h = h
@property
def need_material(self):
return (2 * self.h + 0.3)
if __name__ == '__main__':
print(task)
objects = [
231,
22,
Coat(32),
'test',
True,
Costume(87),
Coat(32)
]
need_material = 0
for obj in objects:
if isinstance(obj, Clothes):
need_material += obj.need_material
print(need_material)
| 25.081967
| 83
| 0.67451
| 200
| 1,530
| 5.05
| 0.53
| 0.083168
| 0.044554
| 0.068317
| 0.10198
| 0.065347
| 0
| 0
| 0
| 0
| 0
| 0.022453
| 0.243137
| 1,530
| 60
| 84
| 25.5
| 0.848877
| 0
| 0
| 0.133333
| 0
| 0.022222
| 0.519608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0.044444
| 0.222222
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7357e7ea86e6139bf2479d07ddffa8fab66e70
| 1,170
|
py
|
Python
|
core/fanarttvapi.py
|
SchadLucas/pyscrape
|
814a5e767ed899b5929533729c15262f1ad6a52b
|
[
"MIT"
] | null | null | null |
core/fanarttvapi.py
|
SchadLucas/pyscrape
|
814a5e767ed899b5929533729c15262f1ad6a52b
|
[
"MIT"
] | 1
|
2015-05-07T11:38:32.000Z
|
2015-05-07T11:38:32.000Z
|
core/fanarttvapi.py
|
SchadLucas/pyscrape
|
814a5e767ed899b5929533729c15262f1ad6a52b
|
[
"MIT"
] | null | null | null |
import urllib2
import json
import time
from core.helpers.decorator import Cached
from core.helpers.config import config
from core.helpers.logger import log, LogLevel
@Cached
def __request(request):
log('Send Fanart Request: ' + request.replace(config.fanart.api_key, 'XXX'), 'DEBUG')
headers = {'Accept': 'application/json'}
_request = urllib2.Request(request, headers=headers)
response_body = urllib2.urlopen(_request).read()
result = json.loads(response_body)
return result
def _get(video_type, movie_id, output_format='JSON'):
req = '{0}{1}/{2}/{3}/{4}'.format(config.fanart.url_base, video_type,
config.fanart.api_key, movie_id, output_format)
try_again = True
n = 0
while try_again and n < 10:
try:
return __request(req)
except urllib2.HTTPError:
n += 1
try_again = True
log('Ooops.. FanartTV Error - Try again', LogLevel.Warning)
time.sleep(2)
def get_movie(tmdb_id):
return _get(video_type='movie', movie_id=tmdb_id)
def get_show(tvdb_id):
return _get(video_type='series', movie_id=tvdb_id)
| 27.857143
| 89
| 0.655556
| 156
| 1,170
| 4.705128
| 0.416667
| 0.049046
| 0.061308
| 0.049046
| 0.054496
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015556
| 0.230769
| 1,170
| 42
| 90
| 27.857143
| 0.8
| 0
| 0
| 0.064516
| 0
| 0
| 0.100769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.193548
| 0.064516
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a73919f13735ea63c30a1b71cb346f2f001cba6
| 2,096
|
py
|
Python
|
metrics.py
|
AndreasLH/Image-Colourization
|
b41182354446feeb80000a84e5db9100b30e9d81
|
[
"MIT"
] | 1
|
2021-11-01T09:53:34.000Z
|
2021-11-01T09:53:34.000Z
|
metrics.py
|
AndreasLH/Image-Colourization
|
b41182354446feeb80000a84e5db9100b30e9d81
|
[
"MIT"
] | null | null | null |
metrics.py
|
AndreasLH/Image-Colourization
|
b41182354446feeb80000a84e5db9100b30e9d81
|
[
"MIT"
] | null | null | null |
from math import log10, sqrt
import cv2
import numpy as np
def PSNR(original, compressed):
'''
Calculates the Peak signal to noise ratio between a ground truth image and predicted image.
see https://www.geeksforgeeks.org/python-peak-signal-to-noise-ratio-psnr/
for reference
Parameters
----------
true image (cv2 image)
predicted image (cv2 image)
Returns
-------
PSNR score
'''
mse = np.mean((original - compressed) ** 2)
if(mse == 0): # MSE is zero means no noise is present in the signal .
# Therefore PSNR have no importance.
return 100
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
def colourfulnessMetric(img):
"""
Created on Mon Nov 15 10:55:16 2021
@author: Yucheng
Parameters
----------
img : cv2 RGB image
Returns
-------
M : colourness metric
-----------------------------
|not colourful | 0 |
|slightly colorful | 15 |
|moderately colourful | 33 |
|averagely colourful | 45 |
|quite colourful | 59 |
|highly colourful | 82 |
|extremely colourful | 109 |
-----------------------------
"""
# Get RGB components
R,G,B = cv2.split(img.astype("float"))
# colourfulness metric from Hasler et al., section 7
rg = R - G
yb = (1/2) * (R+G) - B
sigma_rgyb = np.sqrt(np.var(rg) + np.var(yb))
mu_rgyb = np.sqrt(np.mean(rg)**2 + np.mean(yb)**2)
M = sigma_rgyb + 0.3 * mu_rgyb
return M
def main():
import matplotlib.pyplot as plt
original = cv2.imread("test_imgs/original_image.png")
compressed = cv2.imread("test_imgs/compressed_image1.png", 1)
value = PSNR(original, compressed)
print(f"PSNR value is {value} dB")
img2 = cv2.imread("rainbow.jpg") # opens as BGR
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
plt.imshow(img2[:,:,:])
plt.show()
M = colourfulnessMetric(img2)
print(M)
if __name__ == "__main__":
main()
| 24.372093
| 95
| 0.564885
| 263
| 2,096
| 4.429658
| 0.51711
| 0.046352
| 0.037768
| 0.029185
| 0.037768
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044177
| 0.287214
| 2,096
| 85
| 96
| 24.658824
| 0.735609
| 0.419847
| 0
| 0
| 0
| 0
| 0.100187
| 0.055243
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.125
| 0
| 0.3125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7533a2a833e21052e44904ba80f9df53fd03e4
| 4,560
|
py
|
Python
|
scripts/list-all-test-suites-for-ci.py
|
uc-cdis/gen3-qa
|
6634678b17cb5dd86533667c22037b1e2ddeb0b8
|
[
"Apache-2.0"
] | 4
|
2019-08-30T22:25:24.000Z
|
2021-09-15T19:19:44.000Z
|
scripts/list-all-test-suites-for-ci.py
|
uc-cdis/gen3-qa
|
6634678b17cb5dd86533667c22037b1e2ddeb0b8
|
[
"Apache-2.0"
] | 148
|
2018-04-16T17:26:54.000Z
|
2022-03-04T16:16:02.000Z
|
scripts/list-all-test-suites-for-ci.py
|
uc-cdis/gen3-qa
|
6634678b17cb5dd86533667c22037b1e2ddeb0b8
|
[
"Apache-2.0"
] | 3
|
2019-08-01T03:15:38.000Z
|
2022-03-07T01:23:12.000Z
|
import os
import subprocess
test_suites_that_cant_run_in_parallel = [
"test-apis-dbgapTest", # not thread-safe
"test-google-googleDataAccessTest", # not thread-safe
"test-google-googleServiceAccountRemovalTest", # not thread-safe
"test-guppy-guppyTest", # not thread-safe
"test-smokeTests-brainTests", # manual (executable test)
"test-batch-GoogleBucketManifestGenerationTest", # @donot
"test-batch-S3BucketManifestGenerationTest", # @donot
"test-portal-dataguidOrgTest", # @donot
"test-mariner-marinerIntegrationTest", # @donot
"test-suites-fail", # special suite to force failures for invalid test labels
"test-portal-roleBasedUITest", # manual (executable test)
"test-portal-limitedFilePFBExportTestPlan", # manual (executable test)
"test-access-accessGUITest", # manual (executable test)
"test-portal-tieredAccessTest", # manual (executable test)
"test-portal-discoveryPageTestPlan", # manual (executable test)
"test-portal-dashboardReportsTest", # manual (executable test)
"test-guppy-nestedAggTest", # manual (executable test)
"test-portal-404pageTest", # manual (executable test)
"test-apis-dcfDataReplicationTest", # manual (executable test)
"test-portal-exportPfbToWorkspaceTest", # manual (executable test)
"test-portal-homepageChartNodesExecutableTestPlan",# manual (executable test)
"test-portal-profilePageTest", # manual (executable test)
"test-portal-terraExportWarningTestPlan", # manual (executable test)
"test-pelican-exportPfbTest", # not ready
"test-regressions-exportPerformanceTest", # legacy (disabled test)
"test-regressions-generateTestData", # legacy (disabled test)
"test-regressions-queryPerformanceTest", # legacy (disabled test)
"test-regressions-submissionPerformanceTest", # legacy (disabled test)
"test-dream-challenge-DCgen3clientTest", # legacy (disabled test)
"test-dream-challenge-synapaseLoginTest", # legacy (disabled test)
"test-prod-checkAllProjectsBucketAccessTest", # prod test
"test-portal-pfbExportTest", # nightly build test
"test-apis-etlTest", # long-running test
"test-apis-centralizedAuth", # long-running test
"test-google-googleServiceAccountTest", # long-running test
"test-google-googleServiceAccountKeyTest", # long-running test
"test-portal-dataUploadTest", # SUPER long-running test
"test-portal-indexingPageTest", # long-running test
"test-apis-metadataIngestionTest", # long-running test
"test-apis-auditServiceTest" # long-running test
]
def collect_test_suites_from_codeceptjs_dryrun():
my_env = os.environ.copy()
bashCommand = "npx codeceptjs dry-run"
process = subprocess.Popen(
bashCommand.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env
)
output, error = process.communicate()
test_suites = []
for line in output.splitlines():
line = line.decode("utf-8")
# print(f'### line: {line}')
# ignore pre-release test suites
if "pre-release" in line:
continue
elif ".js" in line:
full_path_to_test_js = line.split("/")
suite_folder = full_path_to_test_js[-2]
# print(f'## suite_folder: {suite_folder}')
test_script = full_path_to_test_js[-1]
# print(f'## test_script: {test_script}')
test_script_without_extension = test_script[0 : test_script.index(".")]
test_suite = f"test-{suite_folder}-{test_script_without_extension}"
test_suites.append(test_suite)
return test_suites
def main():
test_suites = collect_test_suites_from_codeceptjs_dryrun()
for ts in test_suites:
if ts not in test_suites_that_cant_run_in_parallel:
print(ts)
# print(f"## ## test_suites: {test_suites}")
# print(f"## test_suites size: {len(test_suites)}")
if __name__ == "__main__":
main()
| 49.032258
| 112
| 0.608114
| 425
| 4,560
| 6.36
| 0.322353
| 0.085831
| 0.103589
| 0.124306
| 0.332963
| 0.076952
| 0.022937
| 0
| 0
| 0
| 0
| 0.002779
| 0.289912
| 4,560
| 92
| 113
| 49.565217
| 0.831995
| 0.228947
| 0
| 0
| 0
| 0
| 0.394965
| 0.359375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.028169
| 0
| 0.070423
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7597366e4cc059f4fa32bc7a905bc75e50266d
| 1,045
|
py
|
Python
|
querybook/server/lib/query_executor/all_executors.py
|
set5think/querybook
|
25738fe113faa8ee414826d1aa910354ae8a4146
|
[
"Apache-2.0"
] | 1
|
2021-04-01T15:30:11.000Z
|
2021-04-01T15:30:11.000Z
|
querybook/server/lib/query_executor/all_executors.py
|
set5think/querybook
|
25738fe113faa8ee414826d1aa910354ae8a4146
|
[
"Apache-2.0"
] | null | null | null |
querybook/server/lib/query_executor/all_executors.py
|
set5think/querybook
|
25738fe113faa8ee414826d1aa910354ae8a4146
|
[
"Apache-2.0"
] | 1
|
2021-04-02T17:43:41.000Z
|
2021-04-02T17:43:41.000Z
|
from lib.utils.plugin import import_plugin
from .base_executor import parse_exception
from .executors.hive import HiveQueryExecutor
from .executors.presto import PrestoQueryExecutor
from .executors.sqlalchemy import (
MysqlQueryExecutor,
DruidQueryExecutor,
SqliteQueryExecutor,
SnowflakeQueryExecutor,
)
from .executors.bigquery import BigQueryQueryExecutor
ALL_PLUGIN_EXECUTORS = import_plugin("executor_plugin", "ALL_PLUGIN_EXECUTORS", [])
ALL_EXECUTORS = [
HiveQueryExecutor,
PrestoQueryExecutor,
MysqlQueryExecutor,
DruidQueryExecutor,
SqliteQueryExecutor,
BigQueryQueryExecutor,
SnowflakeQueryExecutor,
] + ALL_PLUGIN_EXECUTORS
def get_executor_class(language: str, name: str):
for executor in ALL_EXECUTORS:
if (
executor.EXECUTOR_LANGUAGE() == language
and executor.EXECUTOR_NAME() == name
):
return executor
raise ValueError(f"Unknown executor {name} with language {language}")
# Re-export parse_exception
parse_exception
| 24.880952
| 83
| 0.749282
| 98
| 1,045
| 7.795918
| 0.408163
| 0.068063
| 0.070681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184689
| 1,045
| 41
| 84
| 25.487805
| 0.896714
| 0.023923
| 0
| 0.266667
| 0
| 0
| 0.081532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.233333
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7705eb0f14b8b24300a2a99d4b3ece8aed7a37
| 3,189
|
py
|
Python
|
bot/exts/info/pypi.py
|
MrGrote/bot
|
acaae30d1c6d401d383e3c1cc55dd1c19ced32c3
|
[
"MIT"
] | 1
|
2022-03-08T07:10:30.000Z
|
2022-03-08T07:10:30.000Z
|
bot/exts/info/pypi.py
|
MrGrote/bot
|
acaae30d1c6d401d383e3c1cc55dd1c19ced32c3
|
[
"MIT"
] | null | null | null |
bot/exts/info/pypi.py
|
MrGrote/bot
|
acaae30d1c6d401d383e3c1cc55dd1c19ced32c3
|
[
"MIT"
] | null | null | null |
import itertools
import random
import re
from contextlib import suppress
from disnake import Embed, NotFound
from disnake.ext.commands import Cog, Context, command
from disnake.utils import escape_markdown
from bot.bot import Bot
from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput
from bot.log import get_logger
from bot.utils.messages import wait_for_deletion
URL = "https://pypi.org/pypi/{package}/json"
PYPI_ICON = "https://cdn.discordapp.com/emojis/766274397257334814.png"
PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))
ILLEGAL_CHARACTERS = re.compile(r"[^-_.a-zA-Z0-9]+")
INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay
log = get_logger(__name__)
class PyPi(Cog):
"""Cog for getting information about PyPi packages."""
def __init__(self, bot: Bot):
self.bot = bot
@command(name="pypi", aliases=("package", "pack", "pip"))
async def get_package_info(self, ctx: Context, package: str) -> None:
"""Provide information about a specific package from PyPI."""
embed = Embed(title=random.choice(NEGATIVE_REPLIES), colour=Colours.soft_red)
embed.set_thumbnail(url=PYPI_ICON)
error = True
if characters := re.search(ILLEGAL_CHARACTERS, package):
embed.description = f"Illegal character(s) passed into command: '{escape_markdown(characters.group(0))}'"
else:
async with self.bot.http_session.get(URL.format(package=package)) as response:
if response.status == 404:
embed.description = "Package could not be found."
elif response.status == 200 and response.content_type == "application/json":
response_json = await response.json()
info = response_json["info"]
embed.title = f"{info['name']} v{info['version']}"
embed.url = info["package_url"]
embed.colour = next(PYPI_COLOURS)
summary = escape_markdown(info["summary"])
# Summary could be completely empty, or just whitespace.
if summary and not summary.isspace():
embed.description = summary
else:
embed.description = "No summary provided."
error = False
else:
embed.description = "There was an error when fetching your PyPi package."
log.trace(f"Error when fetching PyPi package: {response.status}.")
if error:
error_message = await ctx.send(embed=embed)
await wait_for_deletion(error_message, (ctx.author.id,), timeout=INVALID_INPUT_DELETE_DELAY)
# Make sure that we won't cause a ghost-ping by deleting the message
if not (ctx.message.mentions or ctx.message.role_mentions):
with suppress(NotFound):
await ctx.message.delete()
await error_message.delete()
else:
await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
"""Load the PyPi cog."""
bot.add_cog(PyPi(bot))
| 36.238636
| 117
| 0.625902
| 378
| 3,189
| 5.156085
| 0.42328
| 0.041047
| 0.015393
| 0.023602
| 0.022576
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011663
| 0.274067
| 3,189
| 87
| 118
| 36.655172
| 0.830238
| 0.05958
| 0
| 0.070175
| 0
| 0
| 0.146617
| 0.013671
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0.017544
| 0.192982
| 0
| 0.245614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a79b3e5980fa9ccf32a0d7267aad362eafb93af
| 39,389
|
py
|
Python
|
test/dialect/mssql/test_compiler.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | 1
|
2018-11-15T16:02:17.000Z
|
2018-11-15T16:02:17.000Z
|
test/dialect/mssql/test_compiler.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | null | null | null |
test/dialect/mssql/test_compiler.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import schema
from sqlalchemy.sql import table, column, quoted_name
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import mxodbc
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import sql
from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
update, delete, insert, extract, union, func, PrimaryKeyConstraint, \
UniqueConstraint, Index, Sequence, literal
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_select(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select(),
'SELECT sometable.somecolumn FROM sometable')
def test_select_with_nolock(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)')
def test_select_with_nolock_schema(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', Integer),
schema='test_schema')
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT test_schema.sometable.somecolumn '
'FROM test_schema.sometable WITH (NOLOCK)')
def test_select_w_order_by_collate(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', String))
self.assert_compile(
select([t]).
order_by(
t.c.somecolumn.collate("Latin1_General_CS_AS_KS_WS_CI").asc()),
"SELECT sometable.somecolumn FROM sometable "
"ORDER BY sometable.somecolumn COLLATE "
"Latin1_General_CS_AS_KS_WS_CI ASC"
)
def test_join_with_hint(self):
t1 = table('t1',
column('a', Integer),
column('b', String),
column('c', String),
)
t2 = table('t2',
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
join = t1.join(t2, t1.c.a == t2.c.a).\
select().with_hint(t1, 'WITH (NOLOCK)')
self.assert_compile(
join,
'SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c '
'FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a'
)
def test_insert(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.insert(),
'INSERT INTO sometable (somecolumn) VALUES '
'(:somecolumn)')
def test_update(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.update(t.c.somecolumn == 7),
'UPDATE sometable SET somecolumn=:somecolum'
'n WHERE sometable.somecolumn = '
':somecolumn_1', dict(somecolumn=10))
def test_insert_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.insert().
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"INSERT INTO sometable WITH (PAGLOCK) "
"(somecolumn) VALUES (:somecolumn)"
)
def test_update_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"UPDATE sometable WITH (PAGLOCK) "
"SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_update_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("XYZ", "mysql"),
"UPDATE sometable SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.delete().where(t.c.somecolumn == "q").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"DELETE FROM sometable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.delete().
where(t.c.somecolumn == "q").
with_hint("XYZ", dialect_name="mysql"),
"DELETE FROM sometable WHERE "
"sometable.somecolumn = :somecolumn_1"
)
def test_delete_extra_froms(self):
t1 = table('t1', column('c1'))
t2 = table('t2', column('c1'))
q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1"
)
def test_delete_extra_froms_alias(self):
a1 = table('t1', column('c1')).alias('a1')
t2 = table('t2', column('c1'))
q = sql.delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM a1 FROM t1 AS a1, t2 WHERE a1.c1 = t2.c1"
)
self.assert_compile(sql.delete(a1), "DELETE FROM t1 AS a1")
def test_update_from_hint(self):
t = table('sometable', column('somecolumn'))
t2 = table('othertable', column('somecolumn'))
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == t2.c.somecolumn).
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=t2,
dialect_name=darg),
"UPDATE sometable SET somecolumn=:somecolumn "
"FROM sometable, othertable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = othertable.somecolumn"
)
def test_update_to_select_schema(self):
meta = MetaData()
table = Table(
"sometable", meta,
Column("sym", String),
Column("val", Integer),
schema="schema"
)
other = Table(
"#other", meta,
Column("sym", String),
Column("newval", Integer)
)
stmt = table.update().values(
val=select([other.c.newval]).
where(table.c.sym == other.c.sym).as_scalar())
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"(SELECT [#other].newval FROM [#other] "
"WHERE [schema].sometable.sym = [#other].sym)",
)
stmt = table.update().values(val=other.c.newval).\
where(table.c.sym == other.c.sym)
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"[#other].newval FROM [schema].sometable, "
"[#other] WHERE [schema].sometable.sym = [#other].sym",
)
# TODO: not supported yet.
# def test_delete_from_hint(self):
# t = table('sometable', column('somecolumn'))
# t2 = table('othertable', column('somecolumn'))
# for darg in ("*", "mssql"):
# self.assert_compile(
# t.delete().where(t.c.somecolumn==t2.c.somecolumn).
# with_hint("WITH (PAGLOCK)",
# selectable=t2,
# dialect_name=darg),
# ""
# )
def test_strict_binds(self):
"""test the 'strict' compiler binds."""
from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
mxodbc_dialect = mxodbc.dialect()
mxodbc_dialect.statement_compiler = MSSQLStrictCompiler
t = table('sometable', column('foo'))
for expr, compile in [
(
select([literal("x"), literal("y")]),
"SELECT 'x' AS anon_1, 'y' AS anon_2",
),
(
select([t]).where(t.c.foo.in_(['x', 'y', 'z'])),
"SELECT sometable.foo FROM sometable WHERE sometable.foo "
"IN ('x', 'y', 'z')",
),
(
t.c.foo.in_([None]),
"sometable.foo IN (NULL)"
)
]:
self.assert_compile(expr, compile, dialect=mxodbc_dialect)
def test_in_with_subqueries(self):
"""Test removal of legacy behavior that converted "x==subquery"
to use IN.
"""
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().where(t.c.somecolumn
== t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn = '
'(SELECT sometable.somecolumn FROM '
'sometable)')
self.assert_compile(t.select().where(t.c.somecolumn
!= t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn != '
'(SELECT sometable.somecolumn FROM '
'sometable)')
@testing.uses_deprecated
def test_count(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.count(),
'SELECT count(sometable.somecolumn) AS '
'tbl_row_count FROM sometable')
def test_noorderby_insubquery(self):
"""test that the ms-sql dialect removes ORDER BY clauses from
subqueries"""
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
q = select([table1.c.myid],
order_by=[table1.c.myid]).alias('foo')
crit = q.c.myid == table1.c.myid
self.assert_compile(select(['*'], crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid")
def test_force_schema_quoted_name_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_name_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema=quoted_name("Foo.dbo", True)
)
self.assert_compile(
select([tbl]),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="[Foo.dbo]"
)
self.assert_compile(
select([tbl]),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test"
)
def test_schema_autosplit_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="foo.dbo"
)
self.assert_compile(
select([tbl]),
"SELECT foo.dbo.test.id FROM foo.dbo.test"
)
def test_schema_autosplit_w_dot_case_sensitive(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema="Foo.dbo"
)
self.assert_compile(
select([tbl]),
"SELECT [Foo].dbo.test.id FROM [Foo].dbo.test"
)
def test_owner_database_pairs(self):
dialect = mssql.dialect()
for identifier, expected_schema, expected_owner in [
("foo", None, "foo"),
("foo.bar", "foo", "bar"),
("Foo.Bar", "Foo", "Bar"),
("[Foo.Bar]", None, "Foo.Bar"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat"),
]:
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
def test_delete_schema(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM paj.test WHERE paj.test.id = '
':id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM paj.test WHERE paj.test.id IN '
'(SELECT paj.test.id FROM paj.test '
'WHERE paj.test.id = :id_1)')
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer,
primary_key=True),
schema='banana.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id IN (SELECT banana.paj.test.id '
'FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1)')
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema='banana split.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id IN ('
'SELECT [banana split].paj.test.id FROM '
'[banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1)')
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True),
schema='banana split.paj with a space')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].[paj with a '
'space].test WHERE [banana split].[paj '
'with a space].test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id IN "
"(SELECT [banana split].[paj with a space].test.id "
"FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id = :id_1)"
)
def test_union(self):
t1 = table(
't1', column('col1'), column('col2'),
column('col3'), column('col4'))
t2 = table(
't2', column('col1'), column('col2'),
column('col3'), column('col4'))
s1, s2 = select(
[t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(['t2col2r2', 't2col2r3']))
u = union(s1, s2, order_by=['col3', 'col4'])
self.assert_compile(u,
'SELECT t1.col3 AS col3, t1.col4 AS col4 '
'FROM t1 WHERE t1.col2 IN (:col2_1, '
':col2_2) UNION SELECT t2.col3 AS col3, '
't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
'(:col2_3, :col2_4) ORDER BY col3, col4')
self.assert_compile(u.alias('bar').select(),
'SELECT bar.col3, bar.col4 FROM (SELECT '
't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
'WHERE t1.col2 IN (:col2_1, :col2_2) UNION '
'SELECT t2.col3 AS col3, t2.col4 AS col4 '
'FROM t2 WHERE t2.col2 IN (:col2_3, '
':col2_4)) AS bar')
def test_function(self):
self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)')
self.assert_compile(func.current_time(), 'CURRENT_TIME')
self.assert_compile(func.foo(), 'foo()')
m = MetaData()
t = Table(
'sometable', m, Column('col1', Integer), Column('col2', Integer))
self.assert_compile(select([func.max(t.c.col1)]),
'SELECT max(sometable.col1) AS max_1 FROM '
'sometable')
def test_function_overrides(self):
self.assert_compile(func.current_date(), "GETDATE()")
self.assert_compile(func.length(3), "LEN(:length_1)")
def test_extract(self):
t = table('t', column('col1'))
for field in 'day', 'month', 'year':
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART(%s, t.col1) AS anon_1 FROM t' % field)
def test_update_returning(self):
table1 = table(
'mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)))
u = update(
table1,
values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name')
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description')
u = update(
table1,
values=dict(
name='foo')).returning(table1).where(table1.c.name == 'bar')
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description WHERE mytable.name = '
':name_1')
u = update(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'LEN(inserted.name) AS length_1')
def test_delete_returning(self):
table1 = table(
'mytable', column('myid', Integer),
column('name', String(128)), column('description', String(128)))
d = delete(table1).returning(table1.c.myid, table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name')
d = delete(table1).where(table1.c.name == 'bar'
).returning(table1.c.myid,
table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name WHERE mytable.name = :name_1')
def test_insert_returning(self):
table1 = table(
'mytable', column('myid', Integer),
column('name', String(128)), column('description', String(128)))
i = insert(
table1,
values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name VALUES '
'(:name)')
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description VALUES (:name)')
i = insert(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'LEN(inserted.name) AS length_1 VALUES '
'(:name)')
def test_limit_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(10)
self.assert_compile(
s,
"SELECT TOP 10 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
def test_limit_zero_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(0)
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
def test_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).offset(20)
# test that the select is not altered with subsequent compile
# calls
for i in range(2):
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y "
"AS y, ROW_NUMBER() OVER (ORDER BY t.y) AS "
"mssql_rn FROM t WHERE t.x = :x_1) AS "
"anon_1 WHERE mssql_rn > :param_1",
checkparams={'param_1': 20, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
def test_limit_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
assert t.c.y in set(c._create_result_map()['y'][1])
def test_limit_offset_w_ambiguous_cols(self):
t = table('t', column('x', Integer), column('y', Integer))
cols = [t.c.x, t.c.x.label('q'), t.c.x.label('p'), t.c.y]
s = select(cols).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.q, anon_1.p, anon_1.y "
"FROM (SELECT t.x AS x, t.x AS q, t.x AS p, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 4)
result_map = c._create_result_map()
for col in cols:
is_(result_map[col.key][1][0], col)
def test_limit_offset_with_correlated_order_by(self):
t1 = table('t1', column('x', Integer), column('y', Integer))
t2 = table('t2', column('x', Integer), column('y', Integer))
order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
.limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t1.x AS x, t1.y AS y, "
"ROW_NUMBER() OVER (ORDER BY "
"(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
") AS mssql_rn "
"FROM t1 "
"WHERE t1.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t1.c.x in set(c._create_result_map()['x'][1])
assert t1.c.y in set(c._create_result_map()['y'][1])
def test_offset_dont_misapply_labelreference(self):
m = MetaData()
t = Table('t', m, Column('x', Integer))
expr1 = func.foo(t.c.x).label('x')
expr2 = func.foo(t.c.x).label('y')
stmt1 = select([expr1]).order_by(expr1.desc()).offset(1)
stmt2 = select([expr2]).order_by(expr2.desc()).offset(1)
self.assert_compile(
stmt1,
"SELECT anon_1.x FROM (SELECT foo(t.x) AS x, "
"ROW_NUMBER() OVER (ORDER BY foo(t.x) DESC) AS mssql_rn FROM t) "
"AS anon_1 WHERE mssql_rn > :param_1"
)
self.assert_compile(
stmt2,
"SELECT anon_1.y FROM (SELECT foo(t.x) AS y, "
"ROW_NUMBER() OVER (ORDER BY foo(t.x) DESC) AS mssql_rn FROM t) "
"AS anon_1 WHERE mssql_rn > :param_1"
)
def test_limit_zero_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(0).offset(0)
# render the LIMIT of zero, but not the OFFSET
# of zero, so produces TOP 0
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
def test_primary_key_no_identity(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=False,
primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL, "
"PRIMARY KEY (id))"
)
def test_primary_key_defaults_to_identity(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1), "
"PRIMARY KEY (id))"
)
def test_identity_no_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1)"
")"
)
def test_identity_separate_from_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=False,
primary_key=True),
Column('x', Integer, autoincrement=True)
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL, "
"x INTEGER NOT NULL IDENTITY(1,1), "
"PRIMARY KEY (id))"
)
def test_identity_illegal_two_autoincrements(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, autoincrement=True),
Column('id2', Integer, autoincrement=True),
)
# this will be rejected by the database, just asserting this is what
# the two autoincrements will do right now
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1), "
"id2 INTEGER NOT NULL IDENTITY(1,1))"
)
def test_identity_start_0(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, mssql_identity_start=0,
primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), "
"PRIMARY KEY (id))"
)
def test_identity_increment_5(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, mssql_identity_increment=5,
primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,5), "
"PRIMARY KEY (id))"
)
def test_sequence_start_0(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', 0), primary_key=True))
with testing.expect_deprecated(
"Use of Sequence with SQL Server in order to affect "):
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), "
"PRIMARY KEY (id))"
)
def test_sequence_non_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', start=5),
primary_key=False))
with testing.expect_deprecated(
"Use of Sequence with SQL Server in order to affect "):
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(5,1))"
)
def test_sequence_ignore_nullability(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', start=5),
nullable=True))
with testing.expect_deprecated(
"Use of Sequence with SQL Server in order to affect "):
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(5,1))"
)
def test_table_pkc_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY CLUSTERED (x, y))"
)
def test_table_pkc_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY NONCLUSTERED (x, y))"
)
def test_table_idx_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False)
)
idx = Index("myidx", tbl.c.x, tbl.c.y, mssql_clustered=False)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE NONCLUSTERED INDEX myidx ON test (x, y)"
)
def test_table_uc_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
UniqueConstraint("x", "y", mssql_clustered=False))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NULL, y INTEGER NULL, "
"UNIQUE NONCLUSTERED (x, y))"
)
def test_table_uc_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NULL, "
"PRIMARY KEY (x), UNIQUE CLUSTERED (y))"
)
def test_index_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer))
idx = Index("foo", tbl.c.id, mssql_clustered=True)
self.assert_compile(schema.CreateIndex(idx),
"CREATE CLUSTERED INDEX foo ON test (id)"
)
def test_index_ordering(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x.desc(), "y")
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x DESC, y)"
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer)
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)"
)
def test_drop_index_w_schema(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer),
schema='bar'
)
self.assert_compile(
schema.DropIndex(Index("idx_foo", t1.c.x)),
"DROP INDEX idx_foo ON bar.foo"
)
def test_index_extra_include_1(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=['y'])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_extra_include_2(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=[tbl.c.y])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
class SchemaTest(fixtures.TestBase):
def setup(self):
t = Table('sometable', MetaData(),
Column('pk_column', Integer),
Column('test_column', String)
)
self.column = t.c.test_column
dialect = mssql.dialect()
self.ddl_compiler = dialect.ddl_compiler(dialect,
schema.CreateTable(t))
def _column_spec(self):
return self.ddl_compiler.get_column_specification(self.column)
def test_that_mssql_default_nullability_emits_null(self):
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_none_nullability_does_not_emit_nullability(self):
self.column.nullable = None
eq_("test_column VARCHAR(max)", self._column_spec())
def test_that_mssql_specified_nullable_emits_null(self):
self.column.nullable = True
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_specified_not_nullable_emits_not_null(self):
self.column.nullable = False
eq_("test_column VARCHAR(max) NOT NULL", self._column_spec())
| 39.193035
| 79
| 0.514991
| 4,443
| 39,389
| 4.421337
| 0.069998
| 0.044797
| 0.076156
| 0.033954
| 0.744502
| 0.691305
| 0.665496
| 0.650784
| 0.6208
| 0.592344
| 0
| 0.017902
| 0.357562
| 39,389
| 1,004
| 80
| 39.232072
| 0.758388
| 0.02229
| 0
| 0.445349
| 0
| 0.004651
| 0.22543
| 0.007542
| 0
| 0
| 0
| 0.000996
| 0.110465
| 1
| 0.086047
| false
| 0
| 0.012791
| 0.001163
| 0.103488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7c6f49614d6822678c761e9a25fddc34bcb0a8
| 818
|
py
|
Python
|
SC101Lecture_code/SC101_week4/draw_basic.py
|
Jewel-Hong/SC-projects
|
9502b3f0c789a931226d4ce0200ccec56e47bc14
|
[
"MIT"
] | null | null | null |
SC101Lecture_code/SC101_week4/draw_basic.py
|
Jewel-Hong/SC-projects
|
9502b3f0c789a931226d4ce0200ccec56e47bc14
|
[
"MIT"
] | null | null | null |
SC101Lecture_code/SC101_week4/draw_basic.py
|
Jewel-Hong/SC-projects
|
9502b3f0c789a931226d4ce0200ccec56e47bc14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Stanford CS106AP
TK Drawing Lecture Exercises
Courtesy of Nick Parlante
"""
import tkinter as tk
# provided function, this code is complete
def make_canvas(width, height):
"""
Creates and returns a drawing canvas
of the given int size, ready for drawing.
"""
top = tk.Tk()
top.minsize(width=width + 10, height=height + 10)
canvas = tk.Canvas(top, width=width, height=height)
canvas.pack()
canvas.xview_scroll(6, "units") # hack so (0, 0) works correctly
canvas.yview_scroll(6, "units")
return canvas
def main():
w = make_canvas(1000, 500)
w.create_line(0, 0, 1000, 500, width=5, fill='red')
w.create_text(0, 0, text='SC101', anchor=tk.NW, font='times 80')
tk.mainloop() #告訴電腦不要關掉視窗
if __name__ == '__main__':
main()
| 21.526316
| 69
| 0.656479
| 120
| 818
| 4.358333
| 0.608333
| 0.011472
| 0.045889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055728
| 0.210269
| 818
| 37
| 70
| 22.108108
| 0.75387
| 0.310513
| 0
| 0
| 0
| 0
| 0.06367
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7ce8d41f3884cc3735bd20c347dfb81bcc70b3
| 2,714
|
py
|
Python
|
torchvision/datasets/kinetics.py
|
sh1doy/vision
|
d7dce1034a0682bf8832bc89cda9589d6598087d
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/datasets/kinetics.py
|
sh1doy/vision
|
d7dce1034a0682bf8832bc89cda9589d6598087d
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/datasets/kinetics.py
|
sh1doy/vision
|
d7dce1034a0682bf8832bc89cda9589d6598087d
|
[
"BSD-3-Clause"
] | null | null | null |
from .video_utils import VideoClips
from .utils import list_dir
from .folder import make_dataset
from .vision import VisionDataset
class Kinetics400(VisionDataset):
"""
`Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_
dataset.
Kinetics-400 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (string): Root directory of the Kinetics-400 Dataset.
frames_per_clip (int): number of frames in a clip
step_between_clips (int): number of frames between each clip
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
Returns:
video (Tensor[T, H, W, C]): the `T` video frames
audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
label (int): class of the video clip
"""
def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,
extensions=('avi',), transform=None, _precomputed_metadata=None):
super(Kinetics400, self).__init__(root)
extensions = ('avi',)
classes = list(sorted(list_dir(root)))
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
self.classes = classes
video_list = [x[0] for x in self.samples]
self.video_clips = VideoClips(
video_list,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
)
self.transform = transform
def __len__(self):
return self.video_clips.num_clips()
def __getitem__(self, idx):
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[video_idx][1]
if self.transform is not None:
video = self.transform(video)
return video, audio, label
| 39.333333
| 97
| 0.669123
| 380
| 2,714
| 4.607895
| 0.378947
| 0.03084
| 0.044546
| 0.034266
| 0.033124
| 0.033124
| 0
| 0
| 0
| 0
| 0
| 0.014699
| 0.247973
| 2,714
| 68
| 98
| 39.911765
| 0.843214
| 0.493736
| 0
| 0
| 0
| 0
| 0.004724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.133333
| 0.033333
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7e85c92ceca48b141eaf4f09d1496be103b6aa
| 10,795
|
py
|
Python
|
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py
|
CharleyFarley/ovvio
|
81489ee64f91e4aab908731ce6ddf59edb9314bf
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py
|
CharleyFarley/ovvio
|
81489ee64f91e4aab908731ce6ddf59edb9314bf
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py
|
CharleyFarley/ovvio
|
81489ee64f91e4aab908731ce6ddf59edb9314bf
|
[
"MIT"
] | 1
|
2016-08-24T01:08:34.000Z
|
2016-08-24T01:08:34.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
Build input files for the Qt collection generator.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from six import text_type
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>%(title)s</title>
<homePage>%(homepage)s</homePage>
<startPage>%(startpage)s</startPage>
</assistant>
<docFiles>
<generate>
<file>
<input>%(outname)s.qhp</input>
<output>%(outname)s.qch</output>
</file>
</generate>
<register>
<file>%(outname)s.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
<namespace>%(namespace)s</namespace>
<virtualFolder>doc</virtualFolder>
<customFilter name="%(project)s %(version)s">
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
</customFilter>
<filterSection>
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
<toc>
<section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
</section>
</toc>
<keywords>
%(keywords)s
</keywords>
<files>
%(files)s
</files>
</filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Qt help project, contents and index files.
"""
name = 'qthelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
# self.config.html_style = 'traditional.css'
def handle_finish(self):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
self.info('writing project file...')
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
sections = []
for node in tocdoc.traverse(istoctree):
sections.extend(self.write_toc(node))
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
sections.append(' ' * 4 * 4 + item)
# sections may be unicode strings or byte strings, we have to make sure
# they are all unicode strings before joining them
new_sections = []
for section in sections:
if not isinstance(section, text_type):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
sections = u'\n'.join(new_sections)
# keywords
keywords = []
index = self.env.create_index(self, group_entries=False)
for (key, group) in index:
for title, (refs, subitems, key_) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
keywords = u'\n'.join(keywords)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
staticdir = path.join(outdir, '_static')
imagesdir = path.join(outdir, self.imagedir)
for root, dirs, files in os.walk(outdir):
resourcedir = root.startswith(staticdir) or \
root.startswith(imagesdir)
for fn in files:
if (resourcedir and not fn.endswith('.js')) or \
fn.endswith('.html'):
filename = path.join(root, fn)[olen:]
projectfiles.append(file_template %
{'filename': htmlescape(filename)})
projectfiles = '\n'.join(projectfiles)
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
# dots, are also forbidden
nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
nspace = re.sub(r'\.+', '.', nspace).strip('.')
nspace = nspace.lower()
# write the project file
f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8')
try:
f.write(project_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title),
'version': htmlescape(self.config.version),
'project': htmlescape(self.config.project),
'namespace': htmlescape(nspace),
'masterdoc': htmlescape(self.config.master_doc),
'sections': sections,
'keywords': keywords,
'files': projectfiles})
finally:
f.close()
homepage = 'qthelp://' + posixpath.join(
nspace, 'doc', self.get_target_uri(self.config.master_doc))
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
self.info('writing collection project file...')
f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8')
try:
f.write(collection_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title),
'homepage': htmlescape(homepage),
'startpage': htmlescape(startpage)})
finally:
f.close()
def isdocnode(self, node):
if not isinstance(node, nodes.list_item):
return False
if len(node.children) != 2:
return False
if not isinstance(node.children[0], addnodes.compact_paragraph):
return False
if not isinstance(node.children[0][0], nodes.reference):
return False
if not isinstance(node.children[1], nodes.bullet_list):
return False
return True
def write_toc(self, node, indentlevel=4):
# XXX this should return a Unicode string, not a bytestring
parts = []
if self.isdocnode(node):
refnode = node.children[0][0]
link = refnode['refuri']
title = htmlescape(refnode.astext()).replace('"', '"')
item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link}
parts.append(' '*4*indentlevel + item)
for subnode in node.children[1]:
parts.extend(self.write_toc(subnode, indentlevel+1))
parts.append(' '*4*indentlevel + '</section>')
elif isinstance(node, nodes.list_item):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"', '"')
item = section_template % {'title': title, 'ref': link}
item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
return parts
def keyword_item(self, name, ref):
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
shortname = groupdict['title']
id = groupdict.get('id')
# descr = groupdict.get('descr')
if shortname.endswith('()'):
shortname = shortname[:-2]
id = '%s.%s' % (id, shortname)
else:
id = None
if id:
item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
name, id, ref[1])
else:
item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
item.encode('ascii', 'xmlcharrefreplace')
return item
def build_keywords(self, title, refs, subitems):
keywords = []
title = htmlescape(title)
# if len(refs) == 0: # XXX
# write_param('See Also', title)
if len(refs) == 1:
keywords.append(self.keyword_item(title, refs[0]))
elif len(refs) > 1:
for i, ref in enumerate(refs): # XXX
# item = (' '*12 +
# '<keyword name="%s [%d]" ref="%s"/>' % (
# title, i, ref))
# item.encode('ascii', 'xmlcharrefreplace')
# keywords.append(item)
keywords.append(self.keyword_item(title, ref))
if subitems:
for subitem in subitems:
keywords.extend(self.build_keywords(subitem[0], subitem[1], []))
return keywords
| 36.103679
| 80
| 0.568133
| 1,165
| 10,795
| 5.208584
| 0.254077
| 0.01648
| 0.01236
| 0.014832
| 0.224456
| 0.162162
| 0.146341
| 0.117172
| 0.079268
| 0.034113
| 0
| 0.00728
| 0.300139
| 10,795
| 298
| 81
| 36.224832
| 0.795897
| 0.145438
| 0
| 0.146789
| 0
| 0.004587
| 0.215371
| 0.058489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036697
| false
| 0
| 0.050459
| 0.004587
| 0.16055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7edac2ed561ec67cb5f3e276d02750502435c8
| 7,252
|
py
|
Python
|
scripts/scrape_sciencedirect_urls.py
|
UWPRG/BETO2020
|
55b5b329395da79047e9083232101d15af9f2c49
|
[
"MIT"
] | 4
|
2020-03-04T21:08:11.000Z
|
2020-10-28T11:28:00.000Z
|
scripts/scrape_sciencedirect_urls.py
|
UWPRG/BETO2020
|
55b5b329395da79047e9083232101d15af9f2c49
|
[
"MIT"
] | null | null | null |
scripts/scrape_sciencedirect_urls.py
|
UWPRG/BETO2020
|
55b5b329395da79047e9083232101d15af9f2c49
|
[
"MIT"
] | 6
|
2019-04-15T16:51:16.000Z
|
2019-11-13T02:45:53.000Z
|
"""
This code is used to scrape ScienceDirect of publication urls and write them to
a text file in the current directory for later use.
"""
import selenium
from selenium import webdriver
import numpy as np
import pandas as pd
import bs4
from bs4 import BeautifulSoup
import time
from sklearn.utils import shuffle
def scrape_page(driver):
"""
This method finds all the publication result web elements on the webpage.
Parameters
----------
driver (Selenium webdriver object) : Instance of the webdriver class e.g.
webdriver.Chrome()
Returns
-------
elems (list) : A list of all scraped hrefs from the page
"""
elems = driver.find_elements_by_class_name('ResultItem')
return elems
def clean(elems):
"""
This method takes a list of scraped selenium web elements
and filters/ returns only the hrefs leading to publications.
Filtering includes removing all urls with keywords that are indicative of
non-html links.
Parameters
----------
elems (list) : The list of hrefs to be filtered
Returns
-------
urls (list) : The new list of hrefs, which should be the same as the list
displayed on gui ScienceDirect
"""
titles = []
urls = []
for elem in elems:
href_child = elem.find_element_by_css_selector('a[href]')
url = href_child.get_attribute('href')
title = href_child.text
titles.append(title)
urls.append(url)
return urls, titles
def build_url_list(gui_prefix,search_terms,journal_list):
"""
This method takes the list of journals and creates a tiple nested dictionary
containing all accessible urls to each page, in each year, for each journal,
for a given search on sciencedirect.
"""
dict1 = {}
years = np.arange(1995,2020)
for journal in journal_list:
dict2 = {}
for year in years:
dict3 = {}
for i in range(60):
url = gui_prefix + search_terms + '&show=100'+ '&articleTypes=FLA%2CREV' + '&years='+ str(year)
if i != 0:
url = url + '&offset=' + str(i) +'00'
url = url + '&pub=' + journal
dict3[i] = url
dict2[year] = dict3
dict1[journal] = dict2
return dict1
def proxify(scraped_urls,uw_prefix):
"""
This method takes a list of scraped urls and turns them into urls that
go through the UW Library proxy so that all of them are full access.
Parameters
----------
scraped_urls (list) : The list of URLs to be converted
uw_prefix (str) : The string that all URLs which go through the UW Library
Proxy start with.
Returns
-------
proxy_urls (list) : The list of converted URLs which go through UW Library
proxy
"""
proxy_urls = []
for url in scraped_urls:
sd_id = url[-17:]
newlink = uw_prefix + sd_id
if sd_id.startswith('S'):
proxy_urls.append(newlink)
return proxy_urls
def write_urls(urls,titles,file,journal,year):
"""
This method takes a list of urls and writes them to a desired text file.
Parameters
----------
urls (list) : The list of URLs to be saved.
file (file object) : The opened .txt file which will be written to.
year (str or int) : The year associated with the publication date.
Returns
-------
Does not return anything
"""
for link,title in zip(urls,titles):
line = link + ',' + title + ',' + journal + ',' + str(year)
file.write(line)
file.write('\n')
def find_pubTitle(driver,journal):
"""
This method finds the identifying number for a specific journal. This
identifying number is added to the gui query URL to ensure only publciations
from the desired journal are being found.
"""
pub_elems = driver.find_elements_by_css_selector('input[id*=publicationTitles]')
pub_names = []
for elem in pub_elems:
pub_name = elem.get_attribute("name")
if pub_name == journal:
return elem.get_attribute('id')[-6:] #returns the identifying number
#for that journal
df = pd.read_excel('elsevier_journals.xls')
df.Full_Category = df.Full_Category.str.lower() # lowercase topics for searching
df = df.drop_duplicates(subset = 'Journal_Title') # drop any duplicate journals
df = shuffle(df,random_state = 42)
# The set of default strings that will be used to sort which journals we want
journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem'
,'organic','polymer','chemical engineering','biotech','coloid']
name = df.Full_Category.str.contains # making this an easier command to type
# new dataframe full of only journals who's topic description contained the
# desired keywords
df2 = df[name('polymer') | name('chemistry') | name('energy')
| name('molecular') | name('colloid') | name('biochem')
| name('organic') | name('biotech') | name('chemical')]
journal_list = df2.Journal_Title # Series of only the journals to be searched
gui_prefix = 'https://www.sciencedirect.com/search/advanced?qs='
search_terms = 'chemistry%20OR%20molecule%20OR%20polymer%20OR%20organic'
url_dict = build_url_list(gui_prefix,search_terms,journal_list)
driver = webdriver.Chrome()
uw_prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/'
filename = input("Input filename with .txt extension for URL storage: ")
url_counter = 0
master_list = []
file = open(filename,'a+')
for journal in journal_list:
for year in np.arange(1995,2020):
for offset in np.arange(60):
page = url_dict[journal][year][offset]
print("journal, year, offset = ",journal,year,offset)
driver.get(page)
time.sleep(2) # need sleep to load the page properly
if offset == 0: # if on page 1, we need to grab the publisher number
try: # we may be at a page which won't have the item we are looking for
pubTitles = find_pubTitle(driver,journal_list[journal_counter])
for url in url_dict[journal]:
url = url + '&pubTitles=' + pubTitles # update every url in the list
driver.get(url_dict[journal][year][0]) # reload the first page with the new url
except:
pass # if there is an exception, it means we are on the right page
scraped_elems = scrape_page(driver) # scrape the page
scraped_urls, titles = clean(scraped_elems)
proxy_urls = proxify(scraped_urls,uw_prefix) # not even sure this is needed
write_urls(proxy_urls,titles,file,journal,year)
url_counter += len(proxy_urls)
print('Total URLs saved is: ',url_counter)
if len(scraped_elems) < 100: # after content is saved, go to the next year
break # because we know this is the last page of urls for this year
file.close()
driver.quit()
| 33.730233
| 125
| 0.628792
| 964
| 7,252
| 4.637967
| 0.318465
| 0.01342
| 0.010065
| 0.011631
| 0.129278
| 0.059942
| 0.043391
| 0.030418
| 0.019235
| 0
| 0
| 0.012393
| 0.276751
| 7,252
| 214
| 126
| 33.88785
| 0.840038
| 0.371622
| 0
| 0.019608
| 0
| 0.009804
| 0.14049
| 0.029638
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.009804
| 0.078431
| 0
| 0.186275
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7ef598ad33e1712e909b5218a858b7b8de970f
| 1,903
|
py
|
Python
|
superset/typing.py
|
GodelTech/superset
|
da170aa57e94053cf715f7b41b09901c813a149a
|
[
"Apache-2.0"
] | 7
|
2020-07-31T04:50:01.000Z
|
2021-12-08T07:56:42.000Z
|
superset/typing.py
|
GodelTech/superset
|
da170aa57e94053cf715f7b41b09901c813a149a
|
[
"Apache-2.0"
] | 77
|
2020-02-02T07:54:13.000Z
|
2022-03-23T18:22:04.000Z
|
superset/typing.py
|
GodelTech/superset
|
da170aa57e94053cf715f7b41b09901c813a149a
|
[
"Apache-2.0"
] | 6
|
2020-03-25T01:02:29.000Z
|
2021-05-12T17:11:19.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from flask import Flask
from flask_caching import Cache
from werkzeug.wrappers import Response
CacheConfig = Union[Callable[[Flask], Cache], Dict[str, Any]]
DbapiDescriptionRow = Tuple[
str, str, Optional[str], Optional[str], Optional[int], Optional[int], bool
]
DbapiDescription = Union[List[DbapiDescriptionRow], Tuple[DbapiDescriptionRow, ...]]
DbapiResult = Sequence[Union[List[Any], Tuple[Any, ...]]]
FilterValue = Union[datetime, float, int, str]
FilterValues = Union[FilterValue, List[FilterValue], Tuple[FilterValue]]
FormData = Dict[str, Any]
Granularity = Union[str, Dict[str, Union[str, float]]]
AdhocMetric = Dict[str, Any]
Metric = Union[AdhocMetric, str]
OrderBy = Tuple[Metric, bool]
QueryObjectDict = Dict[str, Any]
VizData = Optional[Union[List[Any], Dict[Any, Any]]]
VizPayload = Dict[str, Any]
# Flask response.
Base = Union[bytes, str]
Status = Union[int, str]
Headers = Dict[str, Any]
FlaskResponse = Union[
Response, Base, Tuple[Base, Status], Tuple[Base, Status, Headers],
]
| 39.645833
| 84
| 0.755123
| 261
| 1,903
| 5.501916
| 0.429119
| 0.034123
| 0.041783
| 0.022284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002451
| 0.142407
| 1,903
| 47
| 85
| 40.489362
| 0.877451
| 0.403573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.185185
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7f17dbb71fa4b55ccb4daea833fc07286f055d
| 2,042
|
py
|
Python
|
log_system_information.py
|
ibaiGorordo/depthai
|
57b437f38ebe80e870ee4852ca7ccc80eaaa76cc
|
[
"MIT"
] | 476
|
2020-04-21T11:38:55.000Z
|
2022-03-29T02:59:34.000Z
|
log_system_information.py
|
ibaiGorordo/depthai
|
57b437f38ebe80e870ee4852ca7ccc80eaaa76cc
|
[
"MIT"
] | 440
|
2020-04-15T19:15:01.000Z
|
2022-03-31T21:17:33.000Z
|
log_system_information.py
|
ibaiGorordo/depthai
|
57b437f38ebe80e870ee4852ca7ccc80eaaa76cc
|
[
"MIT"
] | 124
|
2020-04-23T19:23:25.000Z
|
2022-03-30T19:12:36.000Z
|
#!/usr/bin/env python3
import json
import platform
def make_sys_report(anonymous=False, skipUsb=False, skipPackages=False):
def get_usb():
try:
import usb.core
except ImportError:
yield "NoLib"
return
speeds = ["Unknown", "Low", "Full", "High", "Super", "SuperPlus"]
format_hex = lambda val: f"{val:#0{6}x}"
try:
for dev in usb.core.find(find_all=True):
yield {
"port": dev.port_number,
"vendor_id": format_hex(dev.idVendor),
"product_id": format_hex(dev.idProduct),
"speed": speeds[dev.speed] if dev.speed < len(speeds) else dev.speed
}
except usb.core.NoBackendError:
yield "No USB backend found"
result = {
"architecture": ' '.join(platform.architecture()).strip(),
"machine": platform.machine(),
"platform": platform.platform(),
"processor": platform.processor(),
"python_build": ' '.join(platform.python_build()).strip(),
"python_compiler": platform.python_compiler(),
"python_implementation": platform.python_implementation(),
"python_version": platform.python_version(),
"release": platform.release(),
"system": platform.system(),
"version": platform.version(),
"win32_ver": ' '.join(platform.win32_ver()).strip(),
}
if not skipPackages:
from pip._internal.operations.freeze import freeze
result["packages"] = list(freeze(local_only=True))
if not skipUsb:
result["usb"] = list(get_usb())
if not anonymous:
result["uname"] = ' '.join(platform.uname()).strip(),
return result
if __name__ == "__main__":
data = make_sys_report()
with open("log_system_information.json", "w") as f:
json.dump(data, f, indent=4)
print(json.dumps(data, indent=4))
print("System info gathered successfully - saved as \"log_system_information.json\"")
| 34.610169
| 89
| 0.589128
| 222
| 2,042
| 5.252252
| 0.468468
| 0.041166
| 0.022298
| 0.024014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006036
| 0.269834
| 2,042
| 58
| 90
| 35.206897
| 0.775989
| 0.010284
| 0
| 0.040816
| 0
| 0
| 0.161386
| 0.023762
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.102041
| 0
| 0.183673
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a7fa4ecf8696f5df75632b66c0092d937a89bf0
| 2,346
|
py
|
Python
|
enigma.py
|
fewieden/Enigma-Machine
|
0c130d3cf1bb5146d438cc39dca55ebbcb0f1cdf
|
[
"MIT"
] | 1
|
2018-10-29T10:46:10.000Z
|
2018-10-29T10:46:10.000Z
|
enigma.py
|
fewieden/Enigma-Machine
|
0c130d3cf1bb5146d438cc39dca55ebbcb0f1cdf
|
[
"MIT"
] | null | null | null |
enigma.py
|
fewieden/Enigma-Machine
|
0c130d3cf1bb5146d438cc39dca55ebbcb0f1cdf
|
[
"MIT"
] | 1
|
2021-09-05T16:18:25.000Z
|
2021-09-05T16:18:25.000Z
|
from rotor import Rotor
import sys
import getopt
class Enigma:
def __init__(self, key, rotors):
self.key = list(key)
self.rotors = []
for i in range(0, len(rotors)):
self.rotors.append(Rotor(self.key[i], rotors[i]))
def encrypt(self, word):
cipher = ''
for i, char in enumerate(word.upper()):
distance = self.rotors[i % 2].get_distance(char)
cipher += self.rotors[2].rotate((i + 1) % 2, distance)
return cipher
def decrypt(self, cipher):
word = ''
for i, char in enumerate(cipher.upper()):
distance = self.rotors[2].get_distance(char)
word += self.rotors[i % 2].rotate((i + 1) % 2, distance)
return word
def print_help():
print("\ncommand line arguments:\n" +
"-h/--help: all possible options\n" +
"-k/--key KEY: rotor starting key\n" +
"-p/--phrase Phrase: phrase to encrypt/decrypt\n" +
"-d/--decrypt: enables decrypt default is encrypt\n" +
"--r1 ROTOR: sets rotor 1\n" +
"--r2 ROTOR: sets rotor 2\n" +
"--r3 ROTOR: sets rotor 3\n" +
"possible rotors are 50, 51, 60, 61, 70 and 71\n")
def main(argv):
try:
opts, args = getopt.getopt(argv, "hk:p:d", ["help", "key=", "phrase", "decrypt", "r1=", "r2=", "r3="])
except getopt.GetoptError:
print_help()
sys.exit(2)
key = ''
phrase = ''
encrypt = True
rotors = ['', '', '']
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-k", "--key"):
key = arg
elif opt in ("-p", "--phrase"):
phrase = arg
elif opt in ("-d", "--decrypt"):
encrypt = False
elif opt == "--r1":
rotors[0] = arg
elif opt == "--r2":
rotors[1] = arg
elif opt == "--r3":
rotors[2] = arg
if not key == '' and not phrase == '' and not rotors[0] == ''\
and not rotors[1] == '' and not rotors[2] == '':
machine = Enigma(key, rotors)
if encrypt:
print(machine.encrypt(phrase))
else:
print(machine.decrypt(phrase))
else:
print_help()
if __name__ == '__main__':
main(sys.argv[1:])
| 29.696203
| 110
| 0.498721
| 293
| 2,346
| 3.931741
| 0.286689
| 0.052083
| 0.034722
| 0.017361
| 0.074653
| 0.041667
| 0.041667
| 0
| 0
| 0
| 0
| 0.02635
| 0.336743
| 2,346
| 78
| 111
| 30.076923
| 0.71401
| 0
| 0
| 0.073529
| 0
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.044118
| 0
| 0.161765
| 0.102941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a8049edeef1e3bee26e482ae16b802069251b6f
| 6,780
|
py
|
Python
|
andersoncd/group.py
|
idc9/andersoncd
|
af2123b241e5f82f7c51b2bbf5196fb02723b582
|
[
"BSD-3-Clause"
] | null | null | null |
andersoncd/group.py
|
idc9/andersoncd
|
af2123b241e5f82f7c51b2bbf5196fb02723b582
|
[
"BSD-3-Clause"
] | null | null | null |
andersoncd/group.py
|
idc9/andersoncd
|
af2123b241e5f82f7c51b2bbf5196fb02723b582
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import numpy as np
from scipy import sparse
from numba import njit
from numpy.linalg import norm
from scipy.sparse.linalg import svds
from andersoncd.lasso import dual_lasso
def primal_grp(R, w, alpha, grp_size):
return (0.5 * norm(R) ** 2 + alpha *
norm(w.reshape(-1, grp_size), axis=1).sum())
@njit
def BST(x, u):
"""Block soft-thresholding of vector x at level u."""
norm_x = norm(x)
if norm_x < u:
return np.zeros_like(x)
else:
return (1 - u / norm_x) * x
def BST_vec(x, u, grp_size):
norm_grp = norm(x.reshape(-1, grp_size), axis=1)
scaling = np.maximum(1 - u / norm_grp, 0)
return (x.reshape(-1, grp_size) * scaling[:, None]).reshape(x.shape[0])
@njit
def _bcd(X, w, R, alpha, lc, groups):
grp_size = w.shape[0] // lc.shape[0]
for g in groups:
grp = slice(g * grp_size, (g + 1) * grp_size)
Xg = X[:, grp]
old_w_g = w[grp].copy()
w[grp] = BST(old_w_g + Xg.T @ R / lc[g], alpha / lc[g])
if norm(w[grp] - old_w_g) != 0:
R += np.sum((old_w_g - w[grp]) * Xg, axis=1)
@njit
def _bcd_sparse(
X_data, X_indices, X_indptr, w, R, alpha, lc):
grp_size = w.shape[0] // lc.shape[0]
grad = np.zeros(grp_size)
for g in range(lc.shape[0]):
grad.fill(0)
grp = slice(g * grp_size, (g + 1) * grp_size)
for j in range(grp_size * g, grp_size * (g + 1)):
for ix in range(X_indptr[j], X_indptr[j + 1]):
grad[j % g] += X_data[ix] * R[X_indices[ix]]
old_w_g = w[grp].copy()
w[grp] = BST(old_w_g + grad / lc[g], alpha / lc[g])
if norm(w[grp] - old_w_g) != 0:
for j in range(g * grp_size, (g + 1) * grp_size):
for ix in range(X_indptr[j], X_indptr[j + 1]):
R[X_indices[ix]] += (old_w_g[j % grp_size] -
w[j % grp_size]) * X_data[ix]
def solver_group(
X, y, alpha, grp_size, max_iter=10000, tol=1e-4, f_gap=10, K=5,
use_acc=False, algo='bcd', compute_time=False, tmax=np.infty,
verbose=True):
"""Solve the GroupLasso with BCD/ISTA/FISTA, eventually with extrapolation.
Groups are contiguous, of size grp_size.
Objective:
norm(y - Xw, ord=2)**2 / 2 + alpha * sum_g ||w_{[g]}||_2
TODO: filled docstring
Parameters:
algo: string
'bcd', 'pgd', 'fista'
compute_time : bool, default=False
If you want to compute timings or not
tmax : float, default=1000
Maximum time (in seconds) the algorithm is allowed to run
alpha: strength of the group penalty
"""
is_sparse = sparse.issparse(X)
n_features = X.shape[1]
if n_features % grp_size != 0:
raise ValueError("n_features is not a multiple of group size")
n_groups = n_features // grp_size
_range = np.arange(n_groups)
groups = dict(
bcd=lambda: _range,
bcdshuf=lambda: np.random.choice(n_groups, n_groups, replace=False),
rbcd=lambda: np.random.choice(n_groups, n_groups, replace=True))
if not is_sparse and not np.isfortran(X):
X = np.asfortranarray(X)
last_K_w = np.zeros([K + 1, n_features])
U = np.zeros([K, n_features])
if algo in ('pgd', 'fista'):
if is_sparse:
L = svds(X, k=1)[1][0] ** 2
else:
L = norm(X, ord=2) ** 2
lc = np.zeros(n_groups)
for g in range(n_groups):
X_g = X[:, g * grp_size: (g + 1) * grp_size]
if is_sparse:
gram = (X_g.T @ X_g).todense()
lc[g] = norm(gram, ord=2)
else:
lc[g] = norm(X_g, ord=2) ** 2
w = np.zeros(n_features)
if algo == 'fista':
z = np.zeros(n_features)
t_new = 1
R = y.copy()
E = []
gaps = np.zeros(max_iter // f_gap)
if compute_time:
times = []
t_start = time.time()
for it in range(max_iter):
if it % f_gap == 0:
if algo == 'fista':
R = y - X @ w
p_obj = primal_grp(R, w, alpha, grp_size)
E.append(p_obj)
theta = R / alpha
if compute_time:
elapsed_times = time.time() - t_start
times.append(elapsed_times)
if verbose:
print("elapsed time: %f " % elapsed_times)
if elapsed_times > tmax:
break
d_norm_theta = np.max(
norm((X.T @ theta).reshape(-1, grp_size), axis=1))
if d_norm_theta > 1.:
theta /= d_norm_theta
d_obj = dual_lasso(y, theta, alpha)
gap = p_obj - d_obj
if verbose:
print("Iteration %d, p_obj::%.5f, d_obj::%.5f, gap::%.2e" %
(it, p_obj, d_obj, gap))
gaps[it // f_gap] = gap
if gap < tol:
print("Early exit")
break
if algo.endswith('bcd'):
if is_sparse:
_bcd_sparse(
X.data, X.indices, X.indptr, w, R, alpha, lc)
else:
_bcd(X, w, R, alpha, lc, groups[algo]())
elif algo == 'pgd':
w[:] = BST_vec(w + X.T @ R / L, alpha / L, grp_size)
R[:] = y - X @ w
elif algo == 'fista':
w_old = w.copy()
w[:] = BST_vec(z - X.T @ (X @ z - y) / L, alpha / L, grp_size)
t_old = t_new
t_new = (1. + np.sqrt(1 + 4 * t_old ** 2)) / 2.
z[:] = w + (t_old - 1.) / t_new * (w - w_old)
else:
raise ValueError("Unknown algo %s" % algo)
if use_acc:
if it < K + 1:
last_K_w[it] = w
else:
for k in range(K):
last_K_w[k] = last_K_w[k + 1]
last_K_w[K - 1] = w
for k in range(K):
U[k] = last_K_w[k + 1] - last_K_w[k]
C = np.dot(U, U.T)
try:
z = np.linalg.solve(C, np.ones(K))
c = z / z.sum()
w_acc = np.sum(last_K_w[:-1] * c[:, None],
axis=0)
p_obj = primal_grp(R, w, alpha, grp_size)
R_acc = y - X @ w_acc
p_obj_acc = primal_grp(R_acc, w_acc, alpha, grp_size)
if p_obj_acc < p_obj:
w = w_acc
R = R_acc
except np.linalg.LinAlgError:
if verbose:
print("----------Linalg error")
if compute_time:
return w, np.array(E), gaps[:it // f_gap + 1], times
return w, np.array(E), gaps[:it // f_gap + 1]
| 31.100917
| 79
| 0.488791
| 1,022
| 6,780
| 3.062622
| 0.178082
| 0.067093
| 0.020447
| 0.014377
| 0.268371
| 0.235463
| 0.216294
| 0.182748
| 0.1623
| 0.102875
| 0
| 0.018881
| 0.375074
| 6,780
| 217
| 80
| 31.24424
| 0.719849
| 0.075516
| 0
| 0.223602
| 0
| 0
| 0.030142
| 0
| 0
| 0
| 0
| 0.004608
| 0
| 1
| 0.037267
| false
| 0
| 0.043478
| 0.006211
| 0.118012
| 0.024845
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a804f8203f3e605d7c6651f77fee25137c52bc6
| 6,259
|
py
|
Python
|
textattack/search_methods/greedy_word_swap_wir.py
|
dheerajrav/TextAttack
|
41e747215bb0f01c511af95b16b94704c780cd5a
|
[
"MIT"
] | null | null | null |
textattack/search_methods/greedy_word_swap_wir.py
|
dheerajrav/TextAttack
|
41e747215bb0f01c511af95b16b94704c780cd5a
|
[
"MIT"
] | null | null | null |
textattack/search_methods/greedy_word_swap_wir.py
|
dheerajrav/TextAttack
|
41e747215bb0f01c511af95b16b94704c780cd5a
|
[
"MIT"
] | null | null | null |
"""
Greedy Word Swap with Word Importance Ranking
===================================================
When WIR method is set to ``unk``, this is a reimplementation of the search
method from the paper: Is BERT Really Robust?
A Strong Baseline for Natural Language Attack on Text Classification and
Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and
https://github.com/jind11/TextFooler.
"""
import numpy as np
import torch
from torch.nn.functional import softmax
from textattack.goal_function_results import GoalFunctionResultStatus
from textattack.search_methods import SearchMethod
from textattack.shared.validators import (
transformation_consists_of_word_swaps_and_deletions,
)
class GreedyWordSwapWIR(SearchMethod):
"""An attack that greedily chooses from a list of possible perturbations in
order of index, after ranking indices by importance.
Args:
wir_method: method for ranking most important words
"""
def __init__(self, wir_method="unk"):
self.wir_method = wir_method
def _get_index_order(self, initial_text):
"""Returns word indices of ``initial_text`` in descending order of
importance."""
len_text = len(initial_text.words)
if self.wir_method == "unk":
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif self.wir_method == "weighted-saliency":
# first, compute word saliency
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
saliency_scores = np.array([result.score for result in leave_one_results])
softmax_saliency_scores = softmax(
torch.Tensor(saliency_scores), dim=0
).numpy()
# compute the largest change in score we can find by swapping each word
delta_ps = []
for idx in range(len_text):
transformed_text_candidates = self.get_transformations(
initial_text,
original_text=initial_text,
indices_to_modify=[idx],
)
if not transformed_text_candidates:
# no valid synonym substitutions for this word
delta_ps.append(0.0)
continue
swap_results, _ = self.get_goal_results(transformed_text_candidates)
score_change = [result.score for result in swap_results]
max_score_change = np.max(score_change)
delta_ps.append(max_score_change)
index_scores = softmax_saliency_scores * np.array(delta_ps)
elif self.wir_method == "delete":
leave_one_texts = [
initial_text.delete_word_at_index(i) for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif self.wir_method == "random":
index_order = np.arange(len_text)
np.random.shuffle(index_order)
search_over = False
else:
raise ValueError(f"Unsupported WIR method {self.wir_method}")
if self.wir_method != "random":
index_order = (-index_scores).argsort()
return index_order, search_over
def _perform_search(self, initial_result):
attacked_text = initial_result.attacked_text
# Sort words by order of importance
index_order, search_over = self._get_index_order(attacked_text)
i = 0
cur_result = initial_result
results = None
while i < len(index_order) and not search_over:
transformed_text_candidates = self.get_transformations(
cur_result.attacked_text,
original_text=initial_result.attacked_text,
indices_to_modify=[index_order[i]],
)
i += 1
if len(transformed_text_candidates) == 0:
continue
results, search_over = self.get_goal_results(transformed_text_candidates)
results = sorted(results, key=lambda x: -x.score)
# Skip swaps which don't improve the score
if results[0].score > cur_result.score:
cur_result = results[0]
else:
continue
# If we succeeded, return the index with best similarity.
if cur_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:
best_result = cur_result
# @TODO: Use vectorwise operations
max_similarity = -float("inf")
for result in results:
if result.goal_status != GoalFunctionResultStatus.SUCCEEDED:
break
candidate = result.attacked_text
try:
similarity_score = candidate.attack_attrs["similarity_score"]
except KeyError:
# If the attack was run without any similarity metrics,
# candidates won't have a similarity score. In this
# case, break and return the candidate that changed
# the original score the most.
break
if similarity_score > max_similarity:
max_similarity = similarity_score
best_result = result
return best_result
return cur_result
def check_transformation_compatibility(self, transformation):
"""Since it ranks words by their importance, GreedyWordSwapWIR is
limited to word swap and deletion transformations."""
return transformation_consists_of_word_swaps_and_deletions(transformation)
def extra_repr_keys(self):
return ["wir_method"]
| 41.450331
| 87
| 0.615594
| 712
| 6,259
| 5.148876
| 0.289326
| 0.031915
| 0.028369
| 0.023186
| 0.284234
| 0.229133
| 0.191217
| 0.138571
| 0.138571
| 0.138571
| 0
| 0.005346
| 0.31267
| 6,259
| 150
| 88
| 41.726667
| 0.846815
| 0.204026
| 0
| 0.191919
| 0
| 0
| 0.02441
| 0
| 0
| 0
| 0
| 0.006667
| 0
| 1
| 0.050505
| false
| 0
| 0.060606
| 0.010101
| 0.171717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a81693f8777fdc22a6d886900c28851626fa805
| 377
|
py
|
Python
|
lemur/deployment/service.py
|
rajatsharma94/lemur
|
99f46c1addcd40154835e151d0b189e1578805bb
|
[
"Apache-2.0"
] | 1,656
|
2015-09-20T03:12:28.000Z
|
2022-03-29T18:00:54.000Z
|
lemur/deployment/service.py
|
rajatsharma94/lemur
|
99f46c1addcd40154835e151d0b189e1578805bb
|
[
"Apache-2.0"
] | 3,017
|
2015-09-18T23:15:24.000Z
|
2022-03-30T22:40:02.000Z
|
lemur/deployment/service.py
|
rajatsharma94/lemur
|
99f46c1addcd40154835e151d0b189e1578805bb
|
[
"Apache-2.0"
] | 401
|
2015-09-18T23:02:18.000Z
|
2022-02-20T16:13:14.000Z
|
from lemur import database
def rotate_certificate(endpoint, new_cert):
"""
Rotates a certificate on a given endpoint.
:param endpoint:
:param new_cert:
:return:
"""
# ensure that certificate is available for rotation
endpoint.source.plugin.update_endpoint(endpoint, new_cert)
endpoint.certificate = new_cert
database.update(endpoint)
| 23.5625
| 62
| 0.71618
| 45
| 377
| 5.866667
| 0.555556
| 0.106061
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 377
| 15
| 63
| 25.133333
| 0.882943
| 0.363395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a82635dab6776cd0ffd24a37efd7bf2386d6303
| 1,827
|
py
|
Python
|
train/general_train_example/1_parse.py
|
ss433s/sosweety
|
4cb1a0f061f26e509ee51c0fabd0284ad15804a5
|
[
"MIT"
] | null | null | null |
train/general_train_example/1_parse.py
|
ss433s/sosweety
|
4cb1a0f061f26e509ee51c0fabd0284ad15804a5
|
[
"MIT"
] | null | null | null |
train/general_train_example/1_parse.py
|
ss433s/sosweety
|
4cb1a0f061f26e509ee51c0fabd0284ad15804a5
|
[
"MIT"
] | null | null | null |
import os, sys
import json
# 获取当前路径, 通过anchor文件获取项目root路径
this_file_path = os.path.split(os.path.realpath(__file__))[0]
this_path = this_file_path
root_path = this_file_path
while this_path:
if os.path.exists(os.path.join(this_path, 'sosweety_root_anchor.py')):
root_path = this_path
break
par_path = os.path.dirname(this_path)
# print(par_path)
if par_path == this_path:
break
else:
this_path = par_path
sys.path.append(root_path)
from modules.sParser.sParser import sParser
from modules.knowledgebase.kb import KnowledgeBase
train_dir = 'data/train_zh_wiki'
train_dir = os.path.join(root_path, train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# 解析出parse result file
parse_result_dir = 'parse_result'
parse_result_dir = os.path.join(train_dir, parse_result_dir)
if not os.path.exists(parse_result_dir):
os.makedirs(parse_result_dir)
pos_tags_file_name = 'pos_tags_file'
pos_tags_file_path = os.path.join(parse_result_dir, pos_tags_file_name)
KB = KnowledgeBase()
parser = sParser(KB)
with open(pos_tags_file_path, 'w') as pos_tags_file:
# 打开语料文件
file_path = 'data/corpus/zh_wiki/wiki_test'
file_path = os.path.join(root_path, file_path)
file = open(file_path)
line = file.readline()
count = 0
while line:
count += 1
if count % 5000 == 0:
print('parsed %s sentence' % count)
text = line.strip()
try:
ss_pos_tags = parser.text2ss_pos_tags(text)
for pos_tags in ss_pos_tags:
pos_tags_file.write(json.dumps(pos_tags, ensure_ascii=False) + '\n')
except Exception:
print('line %s decode error' % count)
line = file.readline()
file.close()
| 29.467742
| 85
| 0.667761
| 266
| 1,827
| 4.278195
| 0.293233
| 0.073814
| 0.067663
| 0.036907
| 0.140598
| 0.086116
| 0.050967
| 0
| 0
| 0
| 0
| 0.006433
| 0.234264
| 1,827
| 61
| 86
| 29.95082
| 0.807005
| 0.039409
| 0
| 0.083333
| 0
| 0
| 0.080521
| 0.030787
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a83eff7c2dc361748d280106f49c290cfe4b19f
| 6,474
|
py
|
Python
|
src/phrase_manager/phrase_manager.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | null | null | null |
src/phrase_manager/phrase_manager.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | 6
|
2020-01-28T23:09:44.000Z
|
2022-02-10T01:16:59.000Z
|
src/phrase_manager/phrase_manager.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | null | null | null |
import numpy
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from src.support import support
class PhraseManager:
def __init__(self, configuration):
self.train_phrases, self.train_labels = self._read_train_phrases()
self.test_phrases, self.test_labels = self._read_test_phrases()
self.configuration = configuration
self.tokenizer = None
def get_phrases_train(self):
return self.train_phrases, self.train_labels
def get_phrases_test(self):
return self.test_phrases, self.test_labels
def get_dataset(self, level = None):
if level == support.WORD_LEVEL:
return self._word_process(self.configuration[support.WORD_MAX_LENGTH])
elif level == support.CHAR_LEVEL:
return self._char_process(self.configuration[support.CHAR_MAX_LENGTH])
else:
return self.train_phrases, self.train_labels, self.test_phrases, self.test_labels
def _word_process(self, word_max_length):
tokenizer = Tokenizer(num_words=self.configuration[support.QUANTITY_WORDS])
tokenizer.fit_on_texts(self.train_phrases)
x_train_sequence = tokenizer.texts_to_sequences(self.train_phrases)
x_test_sequence = tokenizer.texts_to_sequences(self.test_phrases)
x_train = sequence.pad_sequences(x_train_sequence, maxlen=word_max_length, padding='post', truncating='post')
x_test = sequence.pad_sequences(x_test_sequence, maxlen=word_max_length, padding='post', truncating='post')
y_train = numpy.array(self.train_labels)
y_test = numpy.array(self.test_labels)
return x_train, y_train, x_test, y_test
def _char_process(self, max_length):
embedding_w, embedding_dic = self._onehot_dic_build()
x_train = []
for i in range(len(self.train_phrases)):
doc_vec = self._doc_process(self.train_phrases[i].lower(), embedding_dic, max_length)
x_train.append(doc_vec)
x_train = numpy.asarray(x_train, dtype='int64')
y_train = numpy.array(self.train_labels, dtype='float32')
x_test = []
for i in range(len( self.test_phrases)):
doc_vec = self._doc_process( self.test_phrases[i].lower(), embedding_dic, max_length)
x_test.append(doc_vec)
x_test = numpy.asarray(x_test, dtype='int64')
y_test = numpy.array(self.test_labels, dtype='float32')
del embedding_w, embedding_dic
return x_train, y_train, x_test, y_test
def _doc_process(self, doc, embedding_dic, max_length):
min_length = min(max_length, len(doc))
doc_vec = numpy.zeros(max_length, dtype='int64')
for j in range(min_length):
if doc[j] in embedding_dic:
doc_vec[j] = embedding_dic[doc[j]]
else:
doc_vec[j] = embedding_dic['UNK']
return doc_vec
def _onehot_dic_build(self):
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
embedding_dic = {}
embedding_w = []
embedding_dic["UNK"] = 0
embedding_w.append(numpy.zeros(len(alphabet), dtype='float32'))
for i, alpha in enumerate(alphabet):
onehot = numpy.zeros(len(alphabet), dtype='float32')
embedding_dic[alpha] = i + 1
onehot[i] = 1
embedding_w.append(onehot)
embedding_w = numpy.array(embedding_w, dtype='float32')
return embedding_w, embedding_dic
def get_tokenizer(self):
if self.tokenizer is None:
self.tokenizer = Tokenizer(num_words=self.configuration[support.QUANTITY_WORDS])
self.tokenizer.fit_on_texts(self.train_phrases)
return self.tokenizer
def text_to_vector_word(self, text):
vector_sequence = self.get_tokenizer().texts_to_sequences([text])
result = sequence.pad_sequences(vector_sequence, maxlen=self.configuration[support.WORD_MAX_LENGTH], padding='post', truncating='post')
return result
def text_to_vector_word_all(self, texts):
vector_sequence = self.get_tokenizer().texts_to_sequences(texts)
result = sequence.pad_sequences(vector_sequence, maxlen=self.configuration[support.WORD_MAX_LENGTH], padding='post', truncating='post')
return result
def text_to_vector_char(self, text):
embedding_dictionary = self._get_embedding_dictionary()
max_length = self.configuration[support.CHAR_MAX_LENGTH]
min_length = min(max_length, len(text))
text_vector = numpy.zeros(max_length, dtype="int64")
for j in range(min_length):
if text[j] in embedding_dictionary:
text_vector[j] = embedding_dictionary[text[j]]
else:
text_vector[j] = embedding_dictionary["UNK"]
return text_vector
def text_to_vector_char_all(self, texts):
embedding_w, embedding_dic = self._onehot_dic_build()
result = []
for i in range(len(texts)):
doc_vec = self.text_to_vector_char(texts[i].lower())
result.append(doc_vec)
result = numpy.asarray(result, dtype="int64")
del embedding_w, embedding_dic
return result
def _get_embedding_dictionary(self):
return {'UNK': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10,
'k': 11, 'l': 12,
'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22,
'w': 23, 'x': 24,
'y': 25, 'z': 26, '0': 27, '1': 28, '2': 29, '3': 30, '4': 31, '5': 32, '6': 33, '7': 34,
'8': 35, '9': 36,
'-': 60, ',': 38, ';': 39, '.': 40, '!': 41, '?': 42, ':': 43, "'": 44, '"': 45, '/': 46,
'\\': 47, '|': 48,
'_': 49, '@': 50, '#': 51, '$': 52, '%': 53, '^': 54, '&': 55, '*': 56, '~': 57, '`': 58,
'+': 59, '=': 61,
'<': 62, '>': 63, '(': 64, ')': 65, '[': 66, ']': 67, '{': 68, '}': 69}
def get_classes(self):
pass
def _read_train_phrases(self):
pass
def _read_test_phrases(self):
pass
class Phrase:
def __init__(self, text, classification):
self.text = text
self.classification = classification
def __str__(self):
return "Classification: " + str(self.classification) + "\nText: " + self.text
| 40.21118
| 143
| 0.61384
| 826
| 6,474
| 4.530266
| 0.215496
| 0.040887
| 0.034206
| 0.035275
| 0.51256
| 0.470337
| 0.37333
| 0.259754
| 0.177445
| 0.115981
| 0
| 0.035178
| 0.24915
| 6,474
| 160
| 144
| 40.4625
| 0.734623
| 0
| 0
| 0.150794
| 0
| 0.063492
| 0.03738
| 0.006951
| 0
| 0
| 0
| 0
| 0
| 1
| 0.150794
| false
| 0.02381
| 0.031746
| 0.031746
| 0.325397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a862e609f431ba255f2003bb9d5372890839f22
| 1,680
|
py
|
Python
|
Networks/Threading/server.py
|
polbebe/PinkPanther
|
c6ba47956b2cae6468ac0cfe56229b5434fec754
|
[
"MIT"
] | null | null | null |
Networks/Threading/server.py
|
polbebe/PinkPanther
|
c6ba47956b2cae6468ac0cfe56229b5434fec754
|
[
"MIT"
] | null | null | null |
Networks/Threading/server.py
|
polbebe/PinkPanther
|
c6ba47956b2cae6468ac0cfe56229b5434fec754
|
[
"MIT"
] | null | null | null |
import gym
import gym.spaces as spaces
import sys
import socket
from _thread import *
import os
import numpy as np
import pandas as pd
import math as m
import time
import random
class NetEnv(gym.Env):
def __init__(self):
# Robot State values that will be bounced with client
self.robot_state = None
self.pos = None
self.message = np.array(12345, dtype=np.float32)
# Socket Conneciton
# MAC find WiFi IP - ipconfig getifaddr en0
HOST = '192.168.1.29'
# Port to listen on (non-privileged ports are > 1023)
PORT = 65432
self.ThreadCount = 0
print('Connected')
# Set up Socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.bind((HOST, PORT))
except socket.error as e:
print(str(e))
print('Waiting for connection[s]...')
self.s.listen()
self.start = 0
# Wait for client[s] to join socket
self.conn1, addr1 = self.s.accept()
print('Connected by: ', addr1)
start_new_thread(self.main_client_thread, (self.conn1, ))
self.conn2, addr2 = self.s.accept()
print('Connected by: ', addr2)
start_new_thread(self.cam_client_thread, (self.conn2, ))
def main_client_thread(self, conn):
data = conn.recv(1024)
print('Main client says: {}'.format(data.decode('utf-8')))
conn.sendall(str.encode('Hi'))
def cam_client_thread(self, conn):
data = conn.recv(1024)
print('Cam client says: {}'.format(data.decode('utf-8')))
conn.sendall(str.encode('Hi'))
def step(self):
self.main_client_thread(self.conn1)
self.cam_client_thread(self.conn2)
if __name__ == '__main__':
# Construct MAIN SERVER object
env = NetEnv()
# WALK
for i in range(100000):
env.step()
print('Done')
| 20
| 60
| 0.691667
| 258
| 1,680
| 4.379845
| 0.457364
| 0.070796
| 0.084956
| 0.053097
| 0.322124
| 0.322124
| 0.228319
| 0.169912
| 0.169912
| 0.097345
| 0
| 0.039046
| 0.176786
| 1,680
| 83
| 61
| 20.240964
| 0.778019
| 0.145833
| 0
| 0.08
| 0
| 0
| 0.099649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.22
| 0
| 0.32
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a880ef41f3bfd67c8ea6c85667d8aef79348500
| 1,744
|
py
|
Python
|
cinder/tests/unit/fake_group_snapshot.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/tests/unit/fake_group_snapshot.py
|
vexata/cinder
|
7b84c0842b685de7ee012acec40fb4064edde5e9
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/tests/unit/fake_group_snapshot.py
|
vexata/cinder
|
7b84c0842b685de7ee012acec40fb4064edde5e9
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from cinder import objects
from cinder.tests.unit import fake_constants as fake
def fake_db_group_snapshot(**updates):
db_group_snapshot = {
'id': fake.GROUP_SNAPSHOT_ID,
'name': 'group-1',
'status': 'available',
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'group_type_id': fake.GROUP_TYPE_ID,
'group_id': fake.GROUP_ID,
}
for name, field in objects.GroupSnapshot.fields.items():
if name in db_group_snapshot:
continue
if field.nullable:
db_group_snapshot[name] = None
elif field.default != fields.UnspecifiedDefault:
db_group_snapshot[name] = field.default
else:
raise Exception('fake_db_group_snapshot needs help with %s.'
% name)
if updates:
db_group_snapshot.update(updates)
return db_group_snapshot
def fake_group_snapshot_obj(context, **updates):
return objects.GroupSnapshot._from_db_object(
context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates))
| 33.538462
| 78
| 0.679472
| 227
| 1,744
| 5.039648
| 0.475771
| 0.125
| 0.118007
| 0.049825
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0.241399
| 1,744
| 51
| 79
| 34.196078
| 0.857899
| 0.332569
| 0
| 0
| 0
| 0
| 0.094159
| 0.01918
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.103448
| 0.034483
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a88b532a2c292c3b22e23456f113d6c77d67696
| 1,258
|
py
|
Python
|
src/tree_visualizer.py
|
szymanskir/msi
|
27013bac31e62b36dff138cfbb91852c96f77ef3
|
[
"MIT"
] | null | null | null |
src/tree_visualizer.py
|
szymanskir/msi
|
27013bac31e62b36dff138cfbb91852c96f77ef3
|
[
"MIT"
] | null | null | null |
src/tree_visualizer.py
|
szymanskir/msi
|
27013bac31e62b36dff138cfbb91852c96f77ef3
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
def display_resolution_tree(resolution_tree: nx.classes.DiGraph):
_draw_resolution_tree_(resolution_tree)
plt.show()
def _draw_resolution_tree_(tree: nx.classes.DiGraph, enable_edge_labels: bool = True, rotate_edge_labels: bool = False):
plt.figure()
# graph
nodes_pos = graphviz_layout(tree, prog='dot')
nx.draw(tree, nodes_pos,
node_size=150, edge_color='#7d7d7d')
# nodes labels
pos_attrs = {}
for node, coords in nodes_pos.items():
pos_attrs[node] = (coords[0], coords[1] - 10)
custom_node_attrs = {}
for node, attr in tree.nodes.items():
custom_node_attrs[node] = str(node)
nodes_bbox = dict(facecolor="w", edgecolor="#d3d3d3", pad=6, lw=0.1)
nx.draw_networkx_labels(
tree, pos_attrs, labels=custom_node_attrs, font_size=13, bbox=nodes_bbox)
# edge labels
if enable_edge_labels:
edges_pos = graphviz_layout(tree, prog='dot')
edge_labels = nx.get_edge_attributes(tree, 'transformation')
nx.draw_networkx_edge_labels(
tree, pos=edges_pos, edge_labels=edge_labels, font_size=13, rotate=rotate_edge_labels)
| 33.105263
| 120
| 0.702703
| 177
| 1,258
| 4.689266
| 0.367232
| 0.108434
| 0.054217
| 0.06747
| 0.06747
| 0.06747
| 0
| 0
| 0
| 0
| 0
| 0.019627
| 0.189984
| 1,258
| 37
| 121
| 34
| 0.794897
| 0.023847
| 0
| 0
| 0
| 0
| 0.028595
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a8a41bb9d474871c5ea7be817390ae9d2fe8454
| 49,397
|
py
|
Python
|
tests/viz_tests.py
|
theoretical-olive/incubator-superset
|
72fc581b1559e7ce08b11c481b88eaa01b2d17de
|
[
"Apache-2.0"
] | 2
|
2020-06-29T20:02:34.000Z
|
2020-06-29T20:02:35.000Z
|
tests/viz_tests.py
|
theoretical-olive/incubator-superset
|
72fc581b1559e7ce08b11c481b88eaa01b2d17de
|
[
"Apache-2.0"
] | null | null | null |
tests/viz_tests.py
|
theoretical-olive/incubator-superset
|
72fc581b1559e7ce08b11c481b88eaa01b2d17de
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_length": 0,
"all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"groupby": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_format": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_df_returns_empty_df(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = "epoch_ms"
result = test_viz.get_df(query_obj)
import logging
logger.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]})
mock_dttm_col.python_date_format = "%Y-%m-%d"
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
)
def test_cache_timeout(self):
datasource = self.get_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
class TableVizTestCase(SupersetTestCase):
def test_get_data_applies_percentage(self):
form_data = {
"groupby": ["groupA", "groupB"],
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"count",
"avg__C",
],
"percent_metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"avg__B",
],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"SUM(value1)": [15, 20, 25, 40],
"avg__B": [10, 20, 5, 15],
"avg__C": [11, 22, 33, 44],
"count": [6, 7, 8, 9],
"groupA": ["A", "B", "C", "C"],
"groupB": ["x", "x", "y", "z"],
}
)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data and computes percents
self.assertEqual(
[
"groupA",
"groupB",
"SUM(value1)",
"count",
"avg__C",
"%SUM(value1)",
"%avg__B",
],
list(data["columns"]),
)
expected = [
{
"groupA": "A",
"groupB": "x",
"SUM(value1)": 15,
"count": 6,
"avg__C": 11,
"%SUM(value1)": 0.15,
"%avg__B": 0.2,
},
{
"groupA": "B",
"groupB": "x",
"SUM(value1)": 20,
"count": 7,
"avg__C": 22,
"%SUM(value1)": 0.2,
"%avg__B": 0.4,
},
{
"groupA": "C",
"groupB": "y",
"SUM(value1)": 25,
"count": 8,
"avg__C": 33,
"%SUM(value1)": 0.25,
"%avg__B": 0.1,
},
{
"groupA": "C",
"groupB": "z",
"SUM(value1)": 40,
"count": 9,
"avg__C": 44,
"%SUM(value1)": 0.4,
"%avg__B": 0.3,
},
]
self.assertEqual(expected, data["records"])
def test_parse_adhoc_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "SUM(value1)",
"operator": "<",
"comparator": "10",
},
{
"expressionType": "SQL",
"clause": "HAVING",
"sqlExpression": "SUM(value1) > 5",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual(
[{"op": "<", "val": "10", "col": "SUM(value1)"}],
query_obj["extras"]["having_druid"],
)
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
"having": "SUM(value1) > 5",
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual([], query_obj["extras"]["having_druid"])
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("", query_obj["extras"]["having"])
def test_query_obj_merges_percent_metrics(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["sum__A", "count", "avg__C"],
"percent_metrics": ["sum__A", "avg__B", "max__Y"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
["sum__A", "count", "avg__C", "avg__B", "max__Y"], query_obj["metrics"]
)
def test_query_obj_throws_columns_and_metrics(self):
datasource = self.get_datasource_mock()
form_data = {"all_columns": ["A", "B"], "metrics": ["x", "y"]}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
del form_data["metrics"]
form_data["groupby"] = ["B", "C"]
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_merges_all_columns(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
"all_columns": ["colA", "colB", "colC"],
"order_by_cols": ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
"columns": ["colD", "colC"],
"groupby": ["colA", "colB"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data["all_columns"], query_obj["columns"])
self.assertEqual([], query_obj["groupby"])
self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
def test_query_obj_uses_sortby(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["colA", "colB"],
"order_desc": False,
}
def run_test(metric):
form_data["timeseries_limit_metric"] = metric
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(["colA", "colB", metric], query_obj["metrics"])
self.assertEqual([(metric, True)], query_obj["orderby"])
run_test("simple_metric")
run_test(
{
"label": "adhoc_metric",
"expressionType": "SIMPLE",
"aggregate": "SUM",
"column": {"column_name": "sort_column",},
}
)
def test_should_be_timeseries_raises_when_no_granularity(self):
datasource = self.get_datasource_mock()
form_data = {"include_time": True}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.should_be_timeseries()
def test_adhoc_metric_with_sortby(self):
metrics = [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "sum_value",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
]
form_data = {
"metrics": metrics,
"timeseries_limit_metric": {
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"order_desc": False,
}
df = pd.DataFrame({"SUM(value1)": [15], "sum_value": [15]})
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
self.assertEqual(["sum_value"], data["columns"])
class DistBarVizTestCase(SupersetTestCase):
def test_groupby_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "anchovies", None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("votes", data["key"])
expected_values = [
{"x": "pepperoni", "y": 5},
{"x": "cheese", "y": 3},
{"x": NULL_STRING, "y": 2},
{"x": "anchovies", "y": 1},
]
self.assertEqual(expected_values, data["values"])
def test_groupby_nans(self):
form_data = {
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["beds"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("count", data["key"])
expected_values = [
{"x": "1.0", "y": 42},
{"x": "0.0", "y": 30},
{"x": "2.0", "y": 29},
{"x": NULL_STRING, "y": 3},
]
self.assertEqual(expected_values, data["values"])
def test_column_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": ["role"],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
"role": ["engineer", "engineer", None, None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)
expected = [
{
"key": NULL_STRING,
"values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
},
{
"key": "engineer",
"values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
},
]
self.assertEqual(expected, data)
class PairedTTestTestCase(SupersetTestCase):
def test_get_data_transforms_dataframe(self):
form_data = {
"groupby": ["groupA", "groupB", "groupC"],
"metrics": ["metric1", "metric2", "metric3"],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"metric1": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 4},
{"x": 200, "y": 5},
{"x": 300, "y": 6},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 7},
{"x": 200, "y": 8},
{"x": 300, "y": 9},
],
"group": ("c1", "c2", "c3"),
},
],
"metric2": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 40},
{"x": 200, "y": 50},
{"x": 300, "y": 60},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 70},
{"x": 200, "y": 80},
{"x": 300, "y": 90},
],
"group": ("c1", "c2", "c3"),
},
],
"metric3": [
{
"values": [
{"x": 100, "y": 100},
{"x": 200, "y": 200},
{"x": 300, "y": 300},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 400},
{"x": 200, "y": 500},
{"x": 300, "y": 600},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 700},
{"x": 200, "y": 800},
{"x": 300, "y": 900},
],
"group": ("c1", "c2", "c3"),
},
],
}
self.assertEqual(data, expected)
def test_get_data_empty_null_keys(self):
form_data = {"groupby": [], "metrics": ["", None]}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[""] = [1, 2, 3]
raw[None] = [10, 20, 30]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"N/A": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": "All",
}
],
"NULL": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": "All",
}
],
}
self.assertEqual(data, expected)
class PartitionVizTestCase(SupersetTestCase):
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_time_series_option(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj["is_timeseries"])
test_viz.form_data["time_series_option"] = "agg_sum"
query_obj = test_viz.query_obj()
self.assertTrue(query_obj["is_timeseries"])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
time_op = "agg_sum"
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
"metric1": {"a1": 6, "b1": 15, "c1": 24},
"metric2": {"a1": 60, "b1": 150, "c1": 240},
"metric3": {"a1": 600, "b1": 1500, "c1": 2400},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
time_op = "agg_mean"
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 200.0,
"metric1": 5.0,
"metric2": 50.0,
"metric3": 500.0,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
"metric1": {"a1": 2, "b1": 5, "c1": 8},
"metric2": {"a1": 20, "b1": 50, "c1": 80},
"metric3": {"a1": 200, "b1": 500, "c1": 800},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {})
time_op = "point_diff"
levels = test_viz.levels_for_diff(time_op, groups, df)
expected = {"metric1": 6, "metric2": 60, "metric3": 600}
self.assertEqual(expected, levels[0].to_dict())
expected = {
"metric1": {"a1": 2, "b1": 2, "c1": 2},
"metric2": {"a1": 20, "b1": 20, "c1": 20},
"metric3": {"a1": 200, "b1": 200, "c1": 200},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(4, len(levels))
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_time_calls_process_data_and_drops_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {"groupby": groups})
def return_args(df_drop, aggregate):
return df_drop
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, df)
self.assertEqual(4, len(levels))
cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
cols += ["groupA"]
self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
cols += ["groupB"]
self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
cols += ["groupC"]
self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
self.assertEqual(4, len(test_viz.process_data.mock_calls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
levels = test_viz.levels_for("agg_sum", groups, df)
nest = test_viz.nest_values(levels)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
metrics = ["metric1", "metric2", "metric3"]
procs = {}
for i in range(0, 4):
df_drop = df.drop(groups[i:], 1)
pivot = df_drop.pivot_table(
index=DTTM_ALIAS, columns=groups[:i], values=metrics
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(None, nest[i].get("val"))
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(3, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
self.assertEqual(
1, len(nest[0]["children"][0]["children"][0]["children"][0]["children"])
)
def test_get_data_calls_correct_method(self):
test_viz = viz.PartitionViz(Mock(), {})
df = Mock()
with self.assertRaises(ValueError):
test_viz.get_data(df)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data["groupby"] = ["groups"]
test_viz.form_data["time_series_option"] = "not_time"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "agg_sum"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "agg_mean"
test_viz.get_data(df)
self.assertEqual("agg_mean", test_viz.levels_for.mock_calls[2][1][0])
test_viz.form_data["time_series_option"] = "point_diff"
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.get_data(df)
self.assertEqual("point_diff", test_viz.levels_for_diff.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "point_percent"
test_viz.get_data(df)
self.assertEqual("point_percent", test_viz.levels_for_diff.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "point_factor"
test_viz.get_data(df)
self.assertEqual("point_factor", test_viz.levels_for_diff.mock_calls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data["time_series_option"] = "adv_anal"
test_viz.get_data(df)
self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
test_viz.form_data["time_series_option"] = "time_series"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[3][1][0])
self.assertEqual(7, len(test_viz.nest_values.mock_calls))
class RoseVisTestCase(SupersetTestCase):
def test_rose_vis_get_data(self):
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
t3 = pd.Timestamp("2004")
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df = pd.DataFrame(raw)
fd = {"metrics": ["metric1"], "groupby": ["groupA"]}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd["metrics"]
res = test_viz.get_data(df)
expected = {
946684800000000000: [
{"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
{"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
{"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
],
1009843200000000000: [
{"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
{"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
{"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
],
1072915200000000000: [
{"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
{"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
{"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
],
}
self.assertEqual(expected, res)
class TimeSeriesTableVizTestCase(SupersetTestCase):
def test_get_data_metrics(self):
form_data = {"metrics": ["sum__A", "count"], "groupby": []}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t2]
raw["sum__A"] = [15, 20]
raw["count"] = [6, 7]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["count", "sum__A"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"sum__A": 15, "count": 6},
t2.strftime(time_format): {"sum__A": 20, "count": 7},
}
self.assertEqual(expected, data["records"])
def test_get_data_group_by(self):
form_data = {"metrics": ["sum__A"], "groupby": ["groupby1"]}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw["sum__A"] = [15, 20, 25, 30, 35, 40]
raw["groupby1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"a1": 15, "a2": 20, "a3": 25},
t2.strftime(time_format): {"a1": 30, "a2": 35, "a3": 40},
}
self.assertEqual(expected, data["records"])
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["a"]}
super_query_obj.return_value = {}
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
form_data["metrics"] = ["x", "y"]
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
class BaseDeckGLVizTestCase(SupersetTestCase):
def test_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == [form_data.get("size")]
form_data = {}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == []
def test_scatterviz_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"}
result = test_viz_deckgl.get_metrics()
assert result == ["int"]
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {}
result = test_viz_deckgl.get_metrics()
assert result == []
def test_get_js_columns(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_js_columns(mock_d)
assert result == {"color": None}
def test_get_properties(self):
mock_d = {}
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(NotImplementedError) as context:
test_viz_deckgl.get_properties(mock_d)
self.assertTrue("" in str(context.exception))
def test_process_spatial_query_obj(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_key = "spatial_key"
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(ValueError) as context:
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
self.assertTrue("Bad spatial key" in str(context.exception))
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": ["lon", "lat"],
"delimited_key": ["lonlat"],
"geohash_key": ["geo"],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
assert expected_results.get(mock_key) == mock_gb
def test_geojson_query_obj(self):
form_data = load_fixture("deck_geojson_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
results = test_viz_deckgl.query_obj()
assert results["metrics"] == []
assert results["groupby"] == []
assert results["columns"] == ["test_col"]
def test_parse_coordinates(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
viz_instance = viz.BaseDeckGLViz(datasource, form_data)
coord = viz_instance.parse_coordinates("1.23, 3.21")
self.assertEqual(coord, (1.23, 3.21))
coord = viz_instance.parse_coordinates("1.23 3.21")
self.assertEqual(coord, (1.23, 3.21))
self.assertEqual(viz_instance.parse_coordinates(None), None)
self.assertEqual(viz_instance.parse_coordinates(""), None)
def test_parse_coordinates_raises(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("NULL")
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj")
@patch("superset.utils.core.uuid.uuid4")
def test_filter_nulls(self, mock_uuid4):
mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678")
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lat",
"isExtra": False,
},
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lon",
"isExtra": False,
},
],
"delimited_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lonlat",
"isExtra": False,
}
],
"geohash_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "geo",
"isExtra": False,
}
],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.copy())
test_viz_deckgl.spatial_control_keys = [mock_key]
test_viz_deckgl.add_null_filters()
adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"]
assert expected_results.get(mock_key) == adhoc_filters
class TimeSeriesVizTestCase(SupersetTestCase):
def test_timeseries_unicode_data(self):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["name"], "metrics": ["sum__payout"]}
raw = {}
raw["name"] = [
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid Basket",
"Real Madrid Basket",
]
raw["__timestamp"] = [
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
]
raw["sum__payout"] = [2, 2, 4, 4]
df = pd.DataFrame(raw)
test_viz = viz.NVD3TimeSeriesViz(datasource, form_data)
viz_data = {}
viz_data = test_viz.get_data(df)
expected = [
{
u"values": [
{u"y": 4, u"x": u"2018-02-20T00:00:00"},
{u"y": 4, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid Basket",),
},
{
u"values": [
{u"y": 2, u"x": u"2018-02-20T00:00:00"},
{u"y": 2, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",),
},
]
self.assertEqual(expected, viz_data)
def test_process_data_resample(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"__timestamp": pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 5.0, 7.0],
}
)
self.assertEqual(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "sum", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0],
)
np.testing.assert_equal(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "asfreq", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0],
)
def test_apply_rolling(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
index=pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
data={"y": [1.0, 2.0, 3.0, 4.0]},
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "cumsum",
"rolling_periods": 0,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 6.0, 10.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "sum",
"rolling_periods": 2,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 5.0, 7.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "mean",
"rolling_periods": 10,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 1.5, 2.0, 2.5],
)
class BigNumberVizTestCase(SupersetTestCase):
def test_get_data(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 3.0, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
self.assertEqual(data[2], {DTTM_ALIAS: pd.Timestamp("2019-01-05"), "y": 3})
def test_get_data_with_none(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, None, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
assert np.isnan(data[2]["y"])
| 38.381507
| 88
| 0.494099
| 5,310
| 49,397
| 4.389831
| 0.09774
| 0.041441
| 0.029344
| 0.040541
| 0.676877
| 0.630459
| 0.596139
| 0.550536
| 0.508709
| 0.459502
| 0
| 0.067229
| 0.342956
| 49,397
| 1,286
| 89
| 38.411353
| 0.650727
| 0.021135
| 0
| 0.475494
| 0
| 0
| 0.16049
| 0.01345
| 0
| 0
| 0
| 0
| 0.104041
| 1
| 0.039553
| false
| 0
| 0.013758
| 0.00086
| 0.062769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a8a7f44a0585244eb2c07e0db4cb782cb9fe0fb
| 1,840
|
py
|
Python
|
chord_sim/modules/taskqueue.py
|
ryogrid/FunnelKVS
|
65c4308ce6e08b819b5396fc1aa658468c276362
|
[
"MIT"
] | 8
|
2022-01-12T00:46:25.000Z
|
2022-03-30T12:00:52.000Z
|
chord_sim/modules/taskqueue.py
|
ryogrid/FunnelKVS
|
65c4308ce6e08b819b5396fc1aa658468c276362
|
[
"MIT"
] | null | null | null |
chord_sim/modules/taskqueue.py
|
ryogrid/FunnelKVS
|
65c4308ce6e08b819b5396fc1aa658468c276362
|
[
"MIT"
] | 1
|
2022-01-12T06:22:31.000Z
|
2022-01-12T06:22:31.000Z
|
# coding:utf-8
from typing import Dict, List, Optional, cast, TYPE_CHECKING
from .chord_util import ChordUtil, InternalControlFlowException, NodeIsDownedExceptiopn
if TYPE_CHECKING:
from .chord_node import ChordNode
class TaskQueue:
JOIN_PARTIAL = "join_partial"
def __init__(self, existing_node : 'ChordNode'):
self.tqueue : List[str] = []
self.existing_node = existing_node
def append_task(self, task_code : str):
self.tqueue.append(task_code)
# キュー内の最初のタスクを実行する
# 処理が失敗した場合は先頭に戻す
def exec_first(self):
if len(self.tqueue) > 0:
ChordUtil.dprint("exec_first_0," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + "," + str(self.tqueue))
task_code : str = self.tqueue.pop()
if task_code == TaskQueue.JOIN_PARTIAL:
# try:
#self.existing_node.stabilizer.partial_join_op()
ret = self.existing_node.stabilizer.partial_join_op()
if (ret.is_ok):
pass
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE
# 実行に失敗したため再実行すべく先頭に戻す
self.tqueue.insert(0, task_code)
ChordUtil.dprint(
"exec_first_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "INTERNAL_CONTROL_FLOW_EXCEPTION_OCCURED")
# except (InternalControlFlowException, NodeIsDownedExceptiopn):
# # 実行に失敗したため再実行すべく先頭に戻す
# self.tqueue.insert(0, task_code)
# ChordUtil.dprint("exec_first_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
# + "INTERNAL_CONTROL_FLOW_EXCEPTION_OCCURED")
| 41.818182
| 134
| 0.613587
| 189
| 1,840
| 5.640212
| 0.338624
| 0.090056
| 0.105066
| 0.067542
| 0.450281
| 0.410882
| 0.410882
| 0.337711
| 0.337711
| 0.337711
| 0
| 0.005414
| 0.297283
| 1,840
| 43
| 135
| 42.790698
| 0.819026
| 0.259239
| 0
| 0
| 0
| 0
| 0.065234
| 0.02891
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.041667
| 0.125
| 0
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a8f1638c1d0ec4d963f02a274edb9bb4662cfb2
| 679
|
py
|
Python
|
programs/buck_logging.py
|
lakshmi2005/buck
|
012a59d5d2e5a45b483e85fb190d2b67ea0c56ab
|
[
"Apache-2.0"
] | 1
|
2018-02-28T06:26:56.000Z
|
2018-02-28T06:26:56.000Z
|
programs/buck_logging.py
|
lakshmi2005/buck
|
012a59d5d2e5a45b483e85fb190d2b67ea0c56ab
|
[
"Apache-2.0"
] | 1
|
2018-12-10T15:54:22.000Z
|
2018-12-10T19:30:37.000Z
|
programs/buck_logging.py
|
lakshmi2005/buck
|
012a59d5d2e5a45b483e85fb190d2b67ea0c56ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
def setup_logging():
# Set log level of the messages to show.
level_name = os.environ.get('BUCK_WRAPPER_LOG_LEVEL', 'INFO')
level_name_to_level = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
level = level_name_to_level.get(level_name.upper(), logging.INFO)
logging.basicConfig(
level=level,
format=(
'%(asctime)s [%(levelname)s][%(filename)s:%(lineno)d] %(message)s'
))
| 27.16
| 78
| 0.624448
| 82
| 679
| 4.963415
| 0.512195
| 0.088452
| 0.054054
| 0.078624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240059
| 679
| 24
| 79
| 28.291667
| 0.78876
| 0.086892
| 0
| 0
| 0
| 0.052632
| 0.202265
| 0.100324
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a8f6e13baf9b1f8e5a29dd8f55cd71b2926de6a
| 1,151
|
py
|
Python
|
CONTENT/DS-n-Algos/ALGO/__PYTHON/celeb.py
|
Web-Dev-Collaborative/DS-ALGO-OFFICIAL
|
6d7195d33c28a0fe22f12231efffb39f4bf05c97
|
[
"Apache-2.0"
] | 11
|
2021-02-18T04:53:44.000Z
|
2022-01-16T10:57:39.000Z
|
CONTENT/DS-n-Algos/ALGO/__PYTHON/celeb.py
|
Web-Dev-Collaborative/DS-ALGO-OFFICIAL
|
6d7195d33c28a0fe22f12231efffb39f4bf05c97
|
[
"Apache-2.0"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
CONTENT/DS-n-Algos/ALGO/__PYTHON/celeb.py
|
Web-Dev-Collaborative/DS-ALGO-OFFICIAL
|
6d7195d33c28a0fe22f12231efffb39f4bf05c97
|
[
"Apache-2.0"
] | 8
|
2021-02-18T05:12:34.000Z
|
2022-03-06T19:02:14.000Z
|
def orangesRotting(elemnts):
if not elemnts or len(elemnts) == 0:
return 0
n = len(elemnts)
m = len(elemnts[0])
rotten = []
for i in range(n):
for j in range(m):
if elemnts[i][j] == 2:
rotten.append((i, j))
mins = 0
def dfs(rotten):
count = []
for i, j in rotten:
if i > 0 and rotten[i - 1][j] == 1:
count.append((i - 1, j))
elemnts[i - 1][j] = 2
if j > 0 and rotten[i][j - 1] == 1:
count.append((i, j - 1))
elemnts[i][j - 1] = 2
if i < n - 1 and rotten[i][j] == 1:
count.append((i, j))
elemnts[i][j] = 2
if j < m - 1 and rotten[i][j] == 1:
count.append((i, j))
elemnts[i][j] = 2
return count
while rotten:
rotten = dfs(rotten)
if not rotten:
break
mins += 1
for i in range(n):
for j in range(m):
if elemnts[i][j] == 1:
return -1
return mins
| 25.021739
| 48
| 0.38662
| 154
| 1,151
| 2.88961
| 0.168831
| 0.058427
| 0.040449
| 0.116854
| 0.408989
| 0.319101
| 0.319101
| 0.319101
| 0.319101
| 0.319101
| 0
| 0.043551
| 0.481321
| 1,151
| 45
| 49
| 25.577778
| 0.701843
| 0
| 0
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a8fdb2b5cc10e441111eda628478417245011ef
| 5,283
|
py
|
Python
|
official/cv/c3d/src/c3d_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/c3d/src/c3d_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/c3d/src/c3d_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import mindspore.nn as nn
import mindspore.ops as P
from mindspore.common import initializer as init
from src.utils import default_recurisive_init, KaimingNormal
class C3D(nn.Cell):
"""
C3D network definition.
Args:
num_classes (int): Class numbers. Default: 1000.
Returns:
Tensor, infer output tensor.
Examples:
>>> C3D(num_classes=1000)
"""
def __init__(self, num_classes=1000):
super(C3D, self).__init__()
self.conv1 = nn.Conv3d(in_channels=3, out_channels=64, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool1 = P.MaxPool3D(kernel_size=(1, 2, 2), strides=(1, 2, 2), pad_mode='same')
self.conv2 = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool2 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv3a = nn.Conv3d(in_channels=128, out_channels=256, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv3b = nn.Conv3d(in_channels=256, out_channels=256, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool3 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv4a = nn.Conv3d(in_channels=256, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv4b = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool4 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.conv5a = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.conv5b = nn.Conv3d(in_channels=512, out_channels=512, kernel_size=(3, 3, 3),
padding=(1, 1, 1, 1, 1, 1), pad_mode='pad', has_bias=True)
self.pool5 = P.MaxPool3D(kernel_size=(2, 2, 2), strides=(2, 2, 2), pad_mode='same')
self.fc6 = nn.Dense(in_channels=8192, out_channels=4096)
self.fc7 = nn.Dense(in_channels=4096, out_channels=4096)
self.fc8 = nn.Dense(in_channels=4096, out_channels=num_classes, bias_init=init.Normal(0.02))
self.dropout = nn.Dropout(keep_prob=0.5)
self.relu = nn.ReLU()
self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)), mode="CONSTANT")
self.__init_weight()
def __init_weight(self):
default_recurisive_init(self)
self.custom_init_weight()
def construct(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = x.view(-1, 512 * 2, 7, 7)
x = self.pad(x)
x = x.view(-1, 512, 2, 8, 8)
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
def custom_init_weight(self):
"""
Init the weight of Conv3d and Dense in the net.
"""
for _, cell in self.cells_and_names():
if isinstance(cell, nn.Conv3d):
cell.weight.set_data(init.initializer(
KaimingNormal(a=math.sqrt(5), mode='fan_out', nonlinearity='relu'),
cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
elif isinstance(cell, nn.Dense):
cell.weight.set_data(init.initializer(
init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(
'zeros', cell.bias.shape, cell.bias.dtype))
| 40.638462
| 100
| 0.570509
| 768
| 5,283
| 3.804688
| 0.223958
| 0.027379
| 0.032854
| 0.032854
| 0.458248
| 0.403149
| 0.375428
| 0.330938
| 0.330938
| 0.330938
| 0
| 0.07141
| 0.276358
| 5,283
| 129
| 101
| 40.953488
| 0.692911
| 0.161651
| 0
| 0.230769
| 0
| 0
| 0.01677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.064103
| 0
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a90c84a059304b0e838dbe80594658dfad7edd3
| 2,119
|
py
|
Python
|
blmath/geometry/apex.py
|
metabolize/blmath
|
8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da
|
[
"BSD-2-Clause"
] | 6
|
2019-09-28T16:48:34.000Z
|
2022-03-25T17:05:46.000Z
|
blmath/geometry/apex.py
|
metabolize/blmath
|
8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da
|
[
"BSD-2-Clause"
] | 6
|
2019-09-09T16:42:02.000Z
|
2021-06-25T15:25:50.000Z
|
blmath/geometry/apex.py
|
metabolize/blmath
|
8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da
|
[
"BSD-2-Clause"
] | 4
|
2017-05-09T16:15:07.000Z
|
2019-02-15T14:15:30.000Z
|
import numpy as np
from blmath.numerics import vx
def apex(points, axis):
'''
Find the most extreme point in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array.
'''
coords_on_axis = points.dot(axis)
return points[np.argmax(coords_on_axis)]
def inflection_points(points, axis, span):
'''
Find the list of vertices that preceed inflection points in a curve. The curve is differentiated
with respect to the coordinate system defined by axis and span.
axis: A vector representing the vertical axis of the coordinate system.
span: A vector representing the the horiztonal axis of the coordinate system.
returns: a list of points in space corresponding to the vertices that
immediately preceed inflection points in the curve
'''
coords_on_span = points.dot(span)
dx = np.gradient(coords_on_span)
coords_on_axis = points.dot(axis)
# Take the second order finite difference of the curve with respect to the
# defined coordinate system
finite_difference_2 = np.gradient(np.gradient(coords_on_axis, dx), dx)
# Compare the product of all neighboring pairs of points in the second derivative
# If a pair of points has a negative product, then the second derivative changes sign
# at one of those points, signalling an inflection point
is_inflection_point = [finite_difference_2[i] * finite_difference_2[i + 1] <= 0 for i in range(len(finite_difference_2) - 1)]
inflection_point_indices = [i for i, b in enumerate(is_inflection_point) if b]
if len(inflection_point_indices) == 0: # pylint: disable=len-as-condition
return []
return points[inflection_point_indices]
def farthest(from_point, to_points):
'''
Find the farthest point among the inputs, to the given point.
Return a tuple: farthest_point, index_of_farthest_point.
'''
absolute_distances = vx.magnitude(to_points - from_point)
index_of_farthest_point = np.argmax(absolute_distances)
farthest_point = to_points[index_of_farthest_point]
return farthest_point, index_of_farthest_point
| 36.534483
| 129
| 0.738084
| 315
| 2,119
| 4.796825
| 0.32381
| 0.068829
| 0.031767
| 0.052945
| 0.126406
| 0.07677
| 0
| 0
| 0
| 0
| 0
| 0.005886
| 0.198207
| 2,119
| 57
| 130
| 37.175439
| 0.883461
| 0.479
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9150e1ffc2d578382971b5ac300e3f70157319
| 637
|
py
|
Python
|
examples/client/main.py
|
TheFarGG/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 3
|
2021-11-06T11:07:18.000Z
|
2022-03-18T09:04:42.000Z
|
examples/client/main.py
|
UnrealFar/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 3
|
2021-11-06T11:22:05.000Z
|
2022-03-12T16:36:52.000Z
|
examples/client/main.py
|
UnrealFar/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 4
|
2021-11-06T11:08:26.000Z
|
2022-03-12T14:25:57.000Z
|
import os
import discode
TOKEN = os.environ.get("TOKEN")
# The token from the developer portal.
client = discode.Client(token=TOKEN, intents=discode.Intents.default())
@client.on_event("ready")
async def on_ready():
print(client.user, "is ready!")
# The ready listener gets fired when the bot/client is completely ready for use.
@client.on_event("message_create")
async def on_message(message: discode.Message):
msg: str = msg.content
if msg.startswith("?hi"):
await message.channel.send("Hi!!!")
# The message_create listener is fired whenever a message is sent to any channel that the bot has access to.
| 23.592593
| 108
| 0.726845
| 96
| 637
| 4.760417
| 0.5
| 0.035011
| 0.056893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161695
| 637
| 26
| 109
| 24.5
| 0.855805
| 0.348509
| 0
| 0
| 0
| 0
| 0.099757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a93d3f57e61e9da5895ceeab547d073a015db76
| 468
|
py
|
Python
|
riccipy/metrics/bondi_2.py
|
cjayross/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | 4
|
2019-08-17T04:28:06.000Z
|
2021-01-02T15:19:18.000Z
|
riccipy/metrics/bondi_2.py
|
grdbii/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | 3
|
2019-08-02T04:07:43.000Z
|
2020-06-18T07:49:38.000Z
|
riccipy/metrics/bondi_2.py
|
grdbii/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | null | null | null |
"""
Name: Bondi
References: Bondi, Proc. Roy. Soc. Lond. A, v282, p303, (1964)
Coordinates: Spherical
Symmetry: Spherical
Notes: Outgoing Coordinates
"""
from sympy import Function, diag, sin, symbols
coords = symbols("r v theta phi", real=True)
variables = ()
functions = symbols("C M", cls=Function)
r, v, th, ph = coords
C, M = functions
metric = diag(0, -C(r, v) ** 2 * (1 - 2 * M(r, v) / r), r ** 2, r ** 2 * sin(th) ** 2)
metric[0, 1] = metric[1, 0] = -C(r, v)
| 27.529412
| 86
| 0.621795
| 78
| 468
| 3.730769
| 0.525641
| 0.034364
| 0.020619
| 0.027491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055263
| 0.188034
| 468
| 16
| 87
| 29.25
| 0.710526
| 0.309829
| 0
| 0
| 0
| 0
| 0.050794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a950c28a9d44906d9a72986af5603b4ab55c885
| 1,583
|
py
|
Python
|
setup.py
|
bcongdon/instapaper-to-sqlite
|
378b87ffcd2832aeff735dd78a0c8206d220b899
|
[
"MIT"
] | 1
|
2021-10-04T05:48:51.000Z
|
2021-10-04T05:48:51.000Z
|
setup.py
|
bcongdon/instapaper-to-sqlite
|
378b87ffcd2832aeff735dd78a0c8206d220b899
|
[
"MIT"
] | null | null | null |
setup.py
|
bcongdon/instapaper-to-sqlite
|
378b87ffcd2832aeff735dd78a0c8206d220b899
|
[
"MIT"
] | 1
|
2022-02-26T14:12:13.000Z
|
2022-02-26T14:12:13.000Z
|
import os
from setuptools import setup
VERSION = "0.2"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="instapaper-to-sqlite",
description="Save data from Instapaper to a SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Benjamin Congdon",
author_email="[email protected]",
url="https://github.com/bcongdon/instapaper-to-sqlite",
project_urls={
"Source": "https://github.com/bcongdon/instapaper-to-sqlite",
"Issues": "https://github.com/bcongdon/instapaper-to-sqlite/issues",
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Database",
],
keywords="instapaper sqlite export dogsheep",
version=VERSION,
packages=["instapaper_to_sqlite"],
entry_points="""
[console_scripts]
instapaper-to-sqlite=instapaper_to_sqlite.cli:cli
""",
install_requires=[
"click",
"requests",
"sqlite-utils~=3.17",
"pyinstapaper @ git+https://github.com/bcongdon/pyinstapaper#egg=pyinstapaper",
],
extras_require={"test": ["pytest"]},
tests_require=["instapaper-to-sqlite[test]"],
)
| 29.867925
| 87
| 0.632975
| 173
| 1,583
| 5.65896
| 0.537572
| 0.110317
| 0.147089
| 0.089888
| 0.134831
| 0.134831
| 0.134831
| 0.093973
| 0
| 0
| 0
| 0.011272
| 0.215414
| 1,583
| 52
| 88
| 30.442308
| 0.776973
| 0
| 0
| 0.043478
| 0
| 0
| 0.504106
| 0.047378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.043478
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a964781b5354d9a284fd5961c977de9c81e555d
| 17,664
|
py
|
Python
|
picket/rvae/train_eval_models.py
|
rekords-uw/Picket
|
773797ae1c1ed37c345facfb43e289a75d92cc1c
|
[
"MIT"
] | 10
|
2020-11-24T17:26:01.000Z
|
2021-09-26T18:41:44.000Z
|
picket/rvae/train_eval_models.py
|
rekords-uw/Picket
|
773797ae1c1ed37c345facfb43e289a75d92cc1c
|
[
"MIT"
] | null | null | null |
picket/rvae/train_eval_models.py
|
rekords-uw/Picket
|
773797ae1c1ed37c345facfb43e289a75d92cc1c
|
[
"MIT"
] | 3
|
2021-05-26T12:45:37.000Z
|
2021-11-22T04:51:40.000Z
|
#!/usr/bin/env python3
import torch
from torch import optim
import torch.nn.functional as F
import argparse
from sklearn.metrics import mean_squared_error
import numpy as np
import json
from . import utils
from .model_utils import get_pi_exact_vec, rnn_vae_forward_one_stage, rnn_vae_forward_two_stage
def training_phase(model, optimizer, train_loader, args, epoch, mute=True):
model.train()
train_loss_vae, train_nll_vae, train_z_kld_vae, train_w_kld_vae = 4*[0]
train_loss_seq, train_nll_seq, train_z_kld_seq, train_w_kld_seq = 4*[0]
train_total_loss_seq_vae, train_loss_seq_vae, train_nll_seq_vae, train_z_kld_seq_vae, train_w_kld_seq_vae = 5*[0]
for batch_idx, unpack in enumerate(train_loader):
data_input = unpack[0]
if args.cuda_on:
data_input = data_input.cuda()
optimizer.zero_grad()
## first foward-pass
p_params, q_params, q_samples = model(data_input, n_epoch=epoch-1)
if not args.AVI:
get_pi_exact_vec(model, data_input, p_params, q_params, args, logit_ret=True) # get pi, saves to q_params (with no_grad)
vae_loss, vae_nll, vae_z_kld, vae_w_kld = model.loss_function(data_input, p_params, q_params, q_samples)
train_loss_vae += vae_loss.item()
train_nll_vae += vae_nll.item()
train_z_kld_vae += vae_z_kld.item()
train_w_kld_vae += vae_w_kld.item()
if args.inference_type == 'vae':
vae_loss.backward()
elif args.inference_type == 'seqvae':
if args.seqvae_bprop: # NOTE: rolls out iterations through time and bprops
params_in = (p_params, q_params, q_samples)
seq_loss_pack, _, _ = rnn_vae_forward_one_stage(params_in, data_input, model, vae_loss, args,
number_steps=args.seqvae_steps, loss_per_iter=True, epoch_id=epoch)
seq_total_loss, seq_final_loss, seq_final_nll, seq_final_z_kld, seq_final_w_kld = seq_loss_pack
train_total_loss_seq_vae += seq_total_loss.item()
train_loss_seq_vae += seq_final_loss.item()
train_nll_seq_vae += seq_final_nll.item()
train_z_kld_seq_vae += seq_final_z_kld.item()
train_w_kld_seq_vae += seq_final_w_kld.item()
else:
vae_loss.backward()
train_total_loss_seq_vae += vae_loss.item()
train_loss_seq_vae += vae_loss.item()
train_nll_seq_vae += vae_nll.item()
train_z_kld_seq_vae += vae_z_kld.item()
train_w_kld_seq_vae += vae_w_kld.item()
seq_total_loss = torch.tensor(0.0)
seq_final_loss = torch.tensor(0.0)
seq_final_nll = torch.tensor(0.0)
seq_final_z_kld = torch.tensor(0.0)
seq_final_w_kld = torch.tensor(0.0)
optimizer.step()
if batch_idx % args.log_interval == 0 and not mute:
print('\n\nTrain Epoch: {} [{}/{} ({:.0f}%)]\tVAE Loss: {:.3f}\tVAE NLL: {:.3f}\tVAE KLD_Z: {:.3f}\tVAE KLD_W: {:.3f}'.format(
epoch, batch_idx * len(data_input), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
vae_loss.item()/len(data_input), vae_nll.item()/len(data_input),
vae_z_kld.item()/len(data_input), vae_w_kld.item()/len(data_input)))
if args.inference_type == 'seqvae':
print('\n')
print('\n\nAdditional Info:\tTotal Seq Loss: {:.3f}\tFinal Seq Loss: {:.3f}\tFinal Sep NLL: {:.3f}\tFinal Sep KLD_Z: {:.3f}\tFinal Sep KLD_W: {:.3f}\n'.format(
seq_total_loss.item()/len(data_input), seq_final_loss.item()/len(data_input),
seq_final_nll.item()/len(data_input), seq_final_z_kld.item()/len(data_input),
seq_final_w_kld.item()/len(data_input)))
dataset_len = float(len(train_loader.dataset))
ret = {'train_loss_vae': train_loss_vae/dataset_len, 'train_nll_vae': train_nll_vae/dataset_len,
'train_z_kld_vae': train_z_kld_vae/dataset_len, 'train_w_kld_vae': train_w_kld_vae/dataset_len}
if args.inference_type == "seqvae":
ret_seq = {'train_loss_seq': train_loss_seq_vae/dataset_len, 'train_nll_seq': train_nll_seq_vae/dataset_len,
'train_z_kld_seq': train_z_kld_seq_vae/dataset_len,'train_w_kld_seq': train_w_kld_seq_vae/dataset_len,
'train_total_loss_seq':train_total_loss_seq_vae/dataset_len}
ret = {**ret, **ret_seq}
return ret
def evaluation_phase(model, data_eval, dataset_obj, args, epoch,
clean_comp_show=False, data_eval_clean=False, logit_pi_prev=torch.tensor([]), w_conv=False, mask_err=None):
# if args.cuda_on:
# model.cpu()
if type(mask_err) != type(None):
mask_err = mask_err.bool()
model.eval()
p_params, q_params, q_samples = model(data_eval)
if not args.AVI:
get_pi_exact_vec(model, data_eval, p_params, q_params, args, logit_ret=True) # get pi
vae_loss, vae_nll, vae_z_kld, vae_w_kld = model.loss_function(data_eval, p_params,
q_params, q_samples)
eval_data_len = data_eval.shape[0]
losses = {'eval_loss_vae': vae_loss.item()/eval_data_len, 'eval_nll_vae':vae_nll.item()/eval_data_len,
'eval_z_kld_vae': vae_z_kld.item()/eval_data_len, 'eval_w_kld_vae':vae_w_kld.item()/eval_data_len}
# SEQ-VAE
if args.inference_type == 'seqvae':
#with torch.no_grad():
params_in = (p_params, q_params, q_samples)
if args.seqvae_two_stage:
seq_loss_pack, _, seq_param_pack = rnn_vae_forward_two_stage(params_in, data_eval, model, vae_loss, args,
number_steps=args.seqvae_steps, number_steps_second_stage=args.steps_2stage,
loss_per_iter=True, mask_err=mask_err, epoch_id=epoch)
else:
seq_loss_pack, _, seq_param_pack = rnn_vae_forward_one_stage(params_in, data_eval, model, vae_loss, args,
number_steps=args.seqvae_steps, loss_per_iter=True, mask_err=mask_err, epoch_id=epoch)
seq_total_loss, seq_final_loss, seq_final_nll, seq_final_z_kld, seq_final_w_kld = seq_loss_pack
p_params_final, q_params_final, q_samples_final = seq_param_pack
losses_seq_vae = {'eval_loss_seq': seq_final_loss.item()/eval_data_len, 'eval_nll_seq': seq_final_nll.item()/eval_data_len,
'eval_z_kld_seq': seq_final_z_kld.item()/eval_data_len, 'eval_w_kld_seq': seq_final_w_kld.item()/eval_data_len,
'eval_total_loss_seq': seq_total_loss.item()/eval_data_len}
losses = {**losses, **losses_seq_vae}
if args.inference_type == 'seqvae':
p_params_metric, q_params_metric, q_samples_metric = p_params_final, q_params_final, q_samples_final
else:
p_params_metric, q_params_metric, q_samples_metric = p_params, q_params, q_samples
#Getting scores and clean component if neededin_aux_samples
with torch.no_grad():
if args.outlier_model == "VAE": # VAE models only (no w's or pi's)
# generative model only p(x|z, ...)
nll_score_mat = utils.generate_score_outlier_matrix(p_params_metric, data_eval, dataset_obj)
pi_score_mat = -10
converg_norm_w = -10
else:
if clean_comp_show:
loss_clean, nll_clean, z_kld_clean, w_kld_clean = model.loss_function(data_eval, p_params_metric,
q_params_metric, q_samples_metric,
clean_comp_only=True,
data_eval_clean=data_eval_clean)
losses_add = {'eval_loss_final_clean': loss_clean.item()/eval_data_len,
'eval_nll_final_clean': nll_clean.item()/eval_data_len,
'eval_z_kld_final_clean': z_kld_clean.item()/eval_data_len,
'eval_w_kld_final_clean': w_kld_clean.item()/eval_data_len
}
losses = {**losses, **losses_add}
# q(w|x, ...) param (pi), used in outlier score
pi_score_mat = torch.sigmoid(q_params_metric['w']['logit_pi']).clamp(1e-6, 1-1e-6)
# -log p(x|z, ...) used as outlier score
nll_score_mat = utils.generate_score_outlier_matrix(p_params_metric, data_eval, dataset_obj)
# check convergence of weights (pi's)
if w_conv:
if logit_pi_prev.nelement() == 0:
logit_pi_prev = torch.zeros_like(q_params_metric['w']['logit_pi'])
converg_norm_w = (q_params_metric['w']['logit_pi'] - logit_pi_prev).norm().item()
logit_pi_prev = q_params_metric['w']['logit_pi'].clone().detach()
else:
converg_norm_w = -10
# insert here measurement of calibration of pi's using MSE or cross-entropy
if isinstance(mask_err, torch.Tensor):
pi_mtx = pi_score_mat
pi_mtx_true = (~mask_err).float()
err_pi = ((pi_mtx - pi_mtx_true)**2).mean()
ce_pi = F.binary_cross_entropy(pi_mtx, pi_mtx_true)
print('MSE on pi pred: {}'.format(err_pi))
print('CE on pi pred: {}'.format(ce_pi))
print('dirt pi median: {} std: {}'.format(torch.sigmoid(q_params_metric['w']['logit_pi'][mask_err]).median(), torch.sigmoid(q_params_metric['w']['logit_pi'][mask_err]).std()))
print('clean pi median: {} std: {}'.format(torch.sigmoid(q_params_metric['w']['logit_pi'][~mask_err]).median(), torch.sigmoid(q_params_metric['w']['logit_pi'][~mask_err]).std()))
metrics = {'nll_score': nll_score_mat, 'pi_score': pi_score_mat, 'converg_norm_w': converg_norm_w}
return losses, metrics
def repair_phase(model, data_dirty, data_clean, dataset_obj, args, mask, mode, epoch):
model.eval()
# model params with input: dirty data
if args.inference_type == 'seqvae':
p_params_xd, q_params_xd, q_samples_xd = model(data_dirty)
if not args.AVI:
get_pi_exact_vec(model, data_dirty, p_params_xd, q_params_xd, args, logit_ret=True)
params_xd_in = (p_params_xd, q_params_xd, q_samples_xd)
if args.seqvae_two_stage:
_, _, (p_params_xd, q_params_xd, q_samples_xd) = rnn_vae_forward_two_stage(params_xd_in, data_dirty, model,
torch.tensor(0.0, device=data_dirty.device),
args, number_steps=args.seqvae_steps,
number_steps_second_stage=args.steps_2stage,
loss_per_iter=True, epoch_id=epoch)
else:
_, _, (p_params_xd, q_params_xd, q_samples_xd) = rnn_vae_forward_one_stage(params_xd_in, data_dirty, model,
torch.tensor(0.0, device=data_dirty.device),
args, number_steps=args.seqvae_steps, loss_per_iter=True, epoch_id=epoch)
else: # standard 'vae' type inference
p_params_xd, q_params_xd, q_samples_xd = model(data_dirty)
if not args.AVI:
get_pi_exact_vec(model, data_dirty, p_params_xd, q_params_xd, args, logit_ret=True) # get pi
# model params with input: underlying clean data
if args.inference_type == 'seqvae':
p_params_xc, q_params_xc, q_samples_xc = model(data_clean)
if not args.AVI:
get_pi_exact_vec(model, data_dirty, p_params_xc, q_params_xc, args, logit_ret=True)
params_xc_in = (p_params_xc, q_params_xc, q_samples_xc)
if args.seqvae_two_stage:
_, _, (p_params_xc, q_params_xc, q_samples_xc) = rnn_vae_forward_two_stage(params_xc_in, data_clean, model,
torch.tensor(0.0, device=data_clean.device),
args, number_steps=args.seqvae_steps,
number_steps_second_stage=args.steps_2stage,
loss_per_iter=True, epoch_id=epoch)
else:
_, _, (p_params_xc, q_params_xc, q_samples_xc) = rnn_vae_forward_one_stage(params_xc_in, data_clean, model,
torch.tensor(0.0, device=data_clean.device),
args, number_steps=args.seqvae_steps, loss_per_iter=True, epoch_id=epoch)
else: # 'vae' type inference
p_params_xc, q_params_xc, q_samples_xc = model(data_clean)
# no need to get pi, not used after
# error (MSE) lower bound, on dirty cell positions only
error_lb_dc, error_lb_dc_per_feat = utils.error_computation(model, data_clean, p_params_xc['x'], mask) # x_truth - f_vae(x_clean)
# error repair, on dirty cell positions only
error_repair_dc, error_repair_dc_per_feat = utils.error_computation(model, data_clean, p_params_xd['x'], mask) # x_truth - f_vae(x_dirty)
print("\n\n {} REPAIR ERROR (DIRTY POS):{}".format(mode, error_repair_dc))
# error upper bound, on dirty cell positions only
error_up_dc, error_up_dc_per_feat = utils.error_computation(model, data_clean, data_dirty, mask, x_input_size=True) # x_truth - x_dirty
# error on clean cell positions only (to test impact on dirty cells on clean cells under model)
error_repair_cc, error_repair_cc_per_feat = utils.error_computation(model, data_clean, p_params_xd['x'], 1-mask)
print("\n\n {} REPAIR ERROR (CLEAN POS):{}".format(mode, error_repair_cc))
# Get NLL (predict. posterior approx) under dirty data
dict_slice = lambda dict_op, row_pos: {key:(value[row_pos,:] \
if value.shape[0]==data_dirty.shape[0] else value) for key, value in dict_op.items()}
dirty_row_pos = mask.any(dim=1).bool()
n_dirty_rows = dirty_row_pos.sum().item()
p_params_xd_sliced = dict_slice(p_params_xd, dirty_row_pos)
q_params_xd_sliced = dict()
if args.outlier_model == 'RVAE':
q_params_xd_sliced['w'] = dict_slice(q_params_xd['w'], dirty_row_pos)
q_params_xd_sliced['z'] = dict_slice(q_params_xd['z'], dirty_row_pos)
q_samples_xd_sliced = dict_slice(q_samples_xd, dirty_row_pos)
vae_loss_dc, vae_nll_dc, vae_z_kld_dc, vae_w_kld_dc = model.loss_function(data_clean[dirty_row_pos,:], p_params_xd_sliced,
q_params_xd_sliced, q_samples_xd_sliced,
clean_comp_only=True,
data_eval_clean=True)
clean_row_pos = ~dirty_row_pos
n_clean_rows = clean_row_pos.sum().item()
p_params_xd_sliced = dict_slice(p_params_xd, clean_row_pos)
q_params_xd_sliced = dict()
if args.outlier_model == 'RVAE':
q_params_xd_sliced['w'] = dict_slice(q_params_xd['w'], clean_row_pos)
q_params_xd_sliced['z'] = dict_slice(q_params_xd['z'], clean_row_pos)
q_samples_xd_sliced = dict_slice(q_samples_xd, clean_row_pos)
vae_loss_cc, vae_nll_cc, vae_z_kld_cc, vae_w_kld_cc = model.loss_function(data_clean[clean_row_pos,:], p_params_xd_sliced,
q_params_xd_sliced, q_samples_xd_sliced,
clean_comp_only=True,
data_eval_clean=True)
eval_data_len = data_dirty.shape[0]
losses = {'eval_loss_final_clean_dc': vae_loss_dc.item()/n_dirty_rows, 'eval_nll_final_clean_dc':vae_nll_dc.item()/n_dirty_rows,
'eval_z_kld_final_clean_dc': vae_z_kld_dc.item()/n_dirty_rows, 'eval_w_kld_final_clean_dc':vae_w_kld_dc.item()/n_dirty_rows,
'eval_loss_final_clean_cc': vae_loss_cc.item()/n_clean_rows, 'eval_nll_final_clean_cc':vae_nll_cc.item()/n_clean_rows,
'eval_z_kld_final_clean_cc': vae_z_kld_cc.item()/n_clean_rows, 'eval_w_kld_final_clean_cc':vae_w_kld_cc.item()/n_clean_rows,
'eval_loss_final_clean_all': (vae_loss_cc+vae_loss_dc).item()/eval_data_len, 'eval_nll_final_clean_all':(vae_nll_cc+vae_nll_dc).item()/eval_data_len,
'eval_z_kld_final_clean_all': (vae_z_kld_cc+vae_z_kld_dc).item()/eval_data_len, 'eval_w_kld_final_clean_all':(vae_w_kld_cc+vae_w_kld_dc).item()/eval_data_len,
'mse_lower_bd_dirtycells': error_lb_dc.item(), 'mse_upper_bd_dirtycells': error_up_dc.item() , 'mse_repair_dirtycells': error_repair_dc.item(),
'mse_repair_cleancells': error_repair_cc.item(),
'errors_per_feature': [error_lb_dc_per_feat, error_repair_dc_per_feat, error_up_dc_per_feat, error_repair_cc_per_feat]}
return losses
| 52.260355
| 194
| 0.614753
| 2,526
| 17,664
| 3.828187
| 0.098575
| 0.034747
| 0.021613
| 0.02637
| 0.667011
| 0.565977
| 0.482834
| 0.402482
| 0.360703
| 0.30848
| 0
| 0.005001
| 0.286798
| 17,664
| 337
| 195
| 52.41543
| 0.762581
| 0.059726
| 0
| 0.28125
| 0
| 0.008929
| 0.080116
| 0.027027
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013393
| false
| 0
| 0.040179
| 0
| 0.066964
| 0.040179
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a96c59de05ef2cf939a78138027073d3aeef532
| 489
|
py
|
Python
|
sztuczna_inteligencja/3-lab/backtrackingSolve.py
|
Magikis/Uniwersity
|
06964ef31d721af85740df1dce3f966006ab9f78
|
[
"MIT"
] | 12
|
2017-11-30T08:45:48.000Z
|
2018-04-26T14:15:45.000Z
|
sztuczna_inteligencja/3-lab/backtrackingSolve.py
|
Magikis/Uniwersity
|
06964ef31d721af85740df1dce3f966006ab9f78
|
[
"MIT"
] | null | null | null |
sztuczna_inteligencja/3-lab/backtrackingSolve.py
|
Magikis/Uniwersity
|
06964ef31d721af85740df1dce3f966006ab9f78
|
[
"MIT"
] | 9
|
2017-10-16T09:42:59.000Z
|
2018-01-27T19:48:45.000Z
|
# import cProfile
# import pstats
# import io
from picture import *
# pr = cProfile.Profile()
# pr.enable()
def out(p):
for i in range(2):
print([len(x) for x in p.perms[i]])
if __name__ == '__main__':
p = Picture()
p.genPerms()
p.detuctAll()
p.backtrackLoop()
p.saveOtput()
# pr.disable()
# s = io.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue())
| 18.111111
| 56
| 0.586912
| 66
| 489
| 4.19697
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002717
| 0.247444
| 489
| 26
| 57
| 18.807692
| 0.75
| 0.441718
| 0
| 0
| 0
| 0
| 0.030651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a96d8eb608d332f737e6f0d0e18267bfd899873
| 1,512
|
py
|
Python
|
benchmark/generate_examples_strprose.py
|
HALOCORE/SynGuar
|
8f7f9ba52e83091ad3def501169fd60d20b28321
|
[
"MIT"
] | 1
|
2021-06-23T05:10:36.000Z
|
2021-06-23T05:10:36.000Z
|
benchmark/generate_examples_strprose.py
|
HALOCORE/SynGuar
|
8f7f9ba52e83091ad3def501169fd60d20b28321
|
[
"MIT"
] | null | null | null |
benchmark/generate_examples_strprose.py
|
HALOCORE/SynGuar
|
8f7f9ba52e83091ad3def501169fd60d20b28321
|
[
"MIT"
] | null | null | null |
# imports
import os
import json
import subprocess
abs_join = lambda p1, p2 : os.path.abspath(os.path.join(p1, p2))
# constants
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SEED_RELPATH = "./strprose/example_files/_seeds.json"
SEED_FULLPATH = abs_join(SCRIPT_DIR, SEED_RELPATH)
SEED_INFO = None
with open(SEED_FULLPATH, 'r') as f:
SEED_INFO = json.load(f)
TOOL_RELPATH = "../StrPROSE-synthesizer/StrPROSE/bin/Debug/netcoreapp3.1/StrPROSE.dll"
TOOL_FULLPATH = abs_join(SCRIPT_DIR, TOOL_RELPATH)
TARGET_RELDIR = "./strprose/targets"
TARGET_FULLDIR = abs_join(SCRIPT_DIR, TARGET_RELDIR)
MAX_SAMPLE_SIZE = 2000
EXAMPLE_RELDIR = "./strprose/example_files"
EXAMPLE_FULLDIR = abs_join(SCRIPT_DIR, EXAMPLE_RELDIR)
TIME_OUT = 120
# methods
def generate_examples(bench_id, seed):
command_line_args = [
"dotnet",
TOOL_FULLPATH,
"--samplegen",
TARGET_FULLDIR,
str(bench_id),
str(seed),
str(MAX_SAMPLE_SIZE),
EXAMPLE_FULLDIR
]
try:
print(f"# -------- Start Process ({bench_id}, {seed}) --------")
done_result = subprocess.run(command_line_args, timeout=TIME_OUT)
print(f"# ^^^^^^^^ Done: {done_result.returncode} ({bench_id}, {seed}) ^^^^^^^^")
except subprocess.TimeoutExpired:
print('# Error: subprocess TIMEOUT !!!')
if __name__ == "__main__":
for bench_id in SEED_INFO["bench_seeds"]:
for seed in SEED_INFO["bench_seeds"][bench_id]:
generate_examples(bench_id, seed)
| 32.170213
| 89
| 0.683862
| 197
| 1,512
| 4.903553
| 0.401015
| 0.050725
| 0.05383
| 0.066253
| 0.233954
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010442
| 0.176587
| 1,512
| 47
| 90
| 32.170213
| 0.765462
| 0.016534
| 0
| 0
| 0
| 0
| 0.236523
| 0.1031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.076923
| 0
| 0.102564
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a96e21fb56076c17506b44887fb9f2f8344e7b0
| 558
|
py
|
Python
|
elliesite/context_processors.py
|
davidkartuzinski/ellieplatformsite
|
63a41cb2a15ae81a7cd3cdf68d783398b3205ce2
|
[
"MIT"
] | 1
|
2021-06-26T22:18:31.000Z
|
2021-06-26T22:18:31.000Z
|
ellie/context_processors.py
|
open-apprentice/ellieplatform-website
|
3018feb05a2a44b916afba3e8e2eb71c18147117
|
[
"MIT"
] | 12
|
2021-06-26T22:38:45.000Z
|
2021-07-07T15:49:43.000Z
|
elliesite/context_processors.py
|
davidkartuzinski/ellieplatformsite
|
63a41cb2a15ae81a7cd3cdf68d783398b3205ce2
|
[
"MIT"
] | 1
|
2021-07-07T15:33:43.000Z
|
2021-07-07T15:33:43.000Z
|
import sys
from django.urls import resolve
def global_vars(request):
return {
'GLOBAL_TWITTER_ACCOUNT': '@open_apprentice',
'ORGANIZATION_NAME': 'Open Apprentice Foundation',
'ORGANIZATION_WEBSITE': 'https://openapprentice.org',
'ORGANIZATION_LOGO': '/static/img/ellie/open-apprentice-logo-full.png', # relative URL with pre /,
'SITE_LOGO_URL': '/static/img/ellie/ellie-platform-logo.png', # relative URL with pre /
'APPNAME': sys.modules[resolve(request.path_info).func.__module__].__package__,
}
| 39.857143
| 107
| 0.693548
| 65
| 558
| 5.676923
| 0.630769
| 0.113821
| 0.075881
| 0.097561
| 0.113821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173835
| 558
| 13
| 108
| 42.923077
| 0.800434
| 0.086022
| 0
| 0
| 0
| 0
| 0.497041
| 0.216963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0.090909
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a96e48e2da874873ab9a54b25f7428bb39c7d94
| 18,180
|
py
|
Python
|
tools/train_net_step.py
|
va1shn9v/Detectron.pytorch
|
3e1cb11f160148248cbbd79e3dd9f490ca9c280a
|
[
"MIT"
] | null | null | null |
tools/train_net_step.py
|
va1shn9v/Detectron.pytorch
|
3e1cb11f160148248cbbd79e3dd9f490ca9c280a
|
[
"MIT"
] | null | null | null |
tools/train_net_step.py
|
va1shn9v/Detectron.pytorch
|
3e1cb11f160148248cbbd79e3dd9f490ca9c280a
|
[
"MIT"
] | null | null | null |
""" Training script for steps_with_decay policy"""
import argparse
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
import _init_paths # pylint: disable=unused-import
import nn as mynn
import utils.net as net_utils
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets.roidb import combined_roidb_for_training
from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch
from modeling.model_builder import Generalized_RCNN
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import setup_logging
from utils.timer import Timer
from utils.training_stats import TrainingStats
# Set up logging and load config options
logger = setup_logging(__name__)
logging.getLogger('roi_data.loader').setLevel(logging.INFO)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--num_classes', dest='num_classes',
help='Number of classes in your custom dataset',
default=None, type=int)
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=20, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--iter_size',
help='Update once every iter_size steps, as in Caffe.',
default=1, type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
# Epoch
parser.add_argument(
'--start_step',
help='Starting step count for training epoch. 0-indexed.',
default=0, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def save_ckpt(output_dir, args, step, train_size, model, optimizer):
"""Save checkpoint"""
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
torch.save({
'step': step,
'train_size': train_size,
'batch_size': args.batch_size,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "custom_dataset" and args.num_classes is None:
raise ValueError("Need number of classes in your custom dataset to run!")
if args.dataset == "coco2017":
cfg.TRAIN.DATASETS = ('coco_2014_train',)
cfg.MODEL.NUM_CLASSES = 4
elif args.dataset == "keypoints_coco2017":
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif args.dataset == "voc2007":
cfg.TRAIN.DATASETS = ('voc_2007_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "voc2012":
cfg.TRAIN.DATASETS = ('voc_2012_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "custom_dataset":
cfg.TRAIN.DATASETS = ('custom_data_train',)
cfg.MODEL.NUM_CLASSES = args.num_classes
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
### Adjust learning based on batch size change linearly
# For iter_size > 1, gradients are `accumulated`, so lr is scaled based
# on batch_size instead of effective_batch_size
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
### Adjust solver steps
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS,
old_max_iter, cfg.SOLVER.MAX_ITER))
# Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function
# of `collect_and_distribute_fpn_rpn_proposals.py`
#
# post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n'
' cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
# Effective training sample size for one epoch
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(
sampler=MinibatchSampler(ratio_list, ratio_index),
batch_size=args.batch_size,
drop_last=True
)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batchSampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
gn_param_nameset = set()
for name, module in maskRCNN.named_modules():
if isinstance(module, nn.GroupNorm):
gn_param_nameset.add(name+'.weight')
gn_param_nameset.add(name+'.bias')
gn_params = []
gn_param_names = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
nograd_param_names = []
for key, value in maskRCNN.named_parameters():
if value.requires_grad:
if 'bias' in key:
bias_params.append(value)
bias_param_names.append(key)
elif key in gn_param_nameset:
gn_params.append(value)
gn_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
else:
nograd_param_names.append(key)
assert (gn_param_nameset - set(nograd_param_names) - set(bias_param_names)) == set(gn_param_names)
# Learning rate of 0 is a dummy value to be set properly at the start of training
params = [
{'params': nonbias_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': bias_params,
'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
{'params': gn_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}
]
# names of paramerters for each paramter
param_names = [nonbias_param_names, bias_param_names, gn_param_names]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint: # For backward compatibility
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d'
% (train_size, checkpoint['train_size']))
# reorder the params in optimizer checkpoint's params_groups if needed
# misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint)
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
optimizer.load_state_dict(checkpoint['optimizer'])
# misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
### Training Setups ###
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger = SummaryWriter(output_dir)
### Training Loop ###
maskRCNN.train()
CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
# Set index for decay steps
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
# Warm up
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
# Learning rate decay
if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
try:
net_outputs = maskRCNN(**input_data)
except:
continue
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr)
if (step+1) % CHECKPOINT_PERIOD == 0:
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
# ---- Training ends ----
# Save last checkpoint
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
if __name__ == '__main__':
main()
| 38.435518
| 107
| 0.642079
| 2,339
| 18,180
| 4.766139
| 0.184694
| 0.030678
| 0.027449
| 0.009419
| 0.22892
| 0.156261
| 0.105938
| 0.080911
| 0.074901
| 0.044582
| 0
| 0.006569
| 0.25473
| 18,180
| 472
| 108
| 38.516949
| 0.816223
| 0.101815
| 0
| 0.14011
| 0
| 0
| 0.155984
| 0.005793
| 0
| 0
| 0
| 0.002119
| 0.019231
| 1
| 0.008242
| false
| 0
| 0.074176
| 0
| 0.087912
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9772419f2ef3e57950559b990b8ce8968146c1
| 4,402
|
py
|
Python
|
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 4
|
2021-10-20T12:39:09.000Z
|
2022-02-26T15:02:08.000Z
|
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 12
|
2020-07-05T14:30:46.000Z
|
2020-08-06T21:06:00.000Z
|
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 1
|
2021-10-20T13:47:10.000Z
|
2021-10-20T13:47:10.000Z
|
# Copyright (c) 2019-2020 hippo91 <[email protected]>
# Copyright (c) 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""Astroid hooks for numpy.core.multiarray module."""
import functools
from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member
from astroid.brain.helpers import register_module_extender
from astroid.builder import parse
from astroid.inference_tip import inference_tip
from astroid.manager import AstroidManager
from astroid.nodes.node_classes import Attribute, Name
def numpy_core_multiarray_transform():
return parse(
"""
# different functions defined in multiarray.py
def inner(a, b):
return numpy.ndarray([0, 0])
def vdot(a, b):
return numpy.ndarray([0, 0])
"""
)
register_module_extender(
AstroidManager(), "numpy.core.multiarray", numpy_core_multiarray_transform
)
METHODS_TO_BE_INFERRED = {
"array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0):
return numpy.ndarray([0, 0])""",
"dot": """def dot(a, b, out=None):
return numpy.ndarray([0, 0])""",
"empty_like": """def empty_like(a, dtype=None, order='K', subok=True):
return numpy.ndarray((0, 0))""",
"concatenate": """def concatenate(arrays, axis=None, out=None):
return numpy.ndarray((0, 0))""",
"where": """def where(condition, x=None, y=None):
return numpy.ndarray([0, 0])""",
"empty": """def empty(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
"bincount": """def bincount(x, weights=None, minlength=0):
return numpy.ndarray([0, 0])""",
"busday_count": """def busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"busday_offset": """def busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"can_cast": """def can_cast(from_, to, casting='safe'):
return True""",
"copyto": """def copyto(dst, src, casting='same_kind', where=True):
return None""",
"datetime_as_string": """def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'):
return numpy.ndarray([0, 0])""",
"is_busday": """def is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"lexsort": """def lexsort(keys, axis=-1):
return numpy.ndarray([0, 0])""",
"may_share_memory": """def may_share_memory(a, b, max_work=None):
return True""",
# Not yet available because dtype is not yet present in those brains
# "min_scalar_type": """def min_scalar_type(a):
# return numpy.dtype('int16')""",
"packbits": """def packbits(a, axis=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
# Not yet available because dtype is not yet present in those brains
# "result_type": """def result_type(*arrays_and_dtypes):
# return numpy.dtype('int16')""",
"shares_memory": """def shares_memory(a, b, max_work=None):
return True""",
"unpackbits": """def unpackbits(a, axis=None, count=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
"unravel_index": """def unravel_index(indices, shape, order='C'):
return (numpy.ndarray([0, 0]),)""",
"zeros": """def zeros(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
}
for method_name, function_src in METHODS_TO_BE_INFERRED.items():
inference_function = functools.partial(infer_numpy_member, function_src)
AstroidManager().register_transform(
Attribute,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
AstroidManager().register_transform(
Name,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
| 43.584158
| 133
| 0.647887
| 560
| 4,402
| 4.948214
| 0.319643
| 0.079394
| 0.116925
| 0.123421
| 0.347889
| 0.310357
| 0.29087
| 0.243594
| 0.186214
| 0.171779
| 0
| 0.027301
| 0.192867
| 4,402
| 100
| 134
| 44.02
| 0.752603
| 0.178328
| 0
| 0.358209
| 0
| 0.059701
| 0.594792
| 0.032475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014925
| false
| 0
| 0.104478
| 0.014925
| 0.432836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a98e4f650bd93b816382fc9c8f7255712fc94e9
| 1,044
|
py
|
Python
|
jp.atcoder/abc056/arc070_b/26725094.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc056/arc070_b/26725094.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc056/arc070_b/26725094.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
import typing
import numpy as np
def solve(a: np.ndarray, k: int) -> typing.NoReturn:
n = len(a)
def compute_dp(a: np.ndarray) -> np.ndarray:
dp = np.zeros((n + 1, k), np.bool8)
dp[0, 0] = True
for i in range(n):
dp[i + 1] = dp[i].copy()
dp[i + 1, a[i] :] |= dp[i, : -a[i]]
return dp
dp_l = compute_dp(a)
dp_r = compute_dp(a[::-1])[::-1]
dp_r = dp_r.astype(np.int64).cumsum(axis=1)
cnt = 0
for p in range(n):
l, r = dp_l[p], dp_r[n - p]
x = a[p]
for i in np.flatnonzero(l).tolist():
if (
not r[k - i - 1]
- (0 if k - x - i - 1 < 0 else r[k - x - i - 1])
>= 1
):
continue
cnt += 1
break
print(n - cnt)
def main() -> typing.NoReturn:
n, k = map(int, input().split())
a = np.array(sys.stdin.readline().split(), dtype=np.int64)
solve(a, k)
main()
| 23.2
| 65
| 0.425287
| 161
| 1,044
| 2.701863
| 0.329193
| 0.022989
| 0.068966
| 0.018391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034146
| 0.41092
| 1,044
| 44
| 66
| 23.727273
| 0.673171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.088235
| 0
| 0.205882
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9ca97f3e91b994b2c90fedaf1ef527a056c57a
| 891
|
py
|
Python
|
app/celery.py
|
TIHLDE/Lepton
|
60ec0793381f1c1b222f305586e8c2d4345fb566
|
[
"MIT"
] | 7
|
2021-03-04T18:49:12.000Z
|
2021-03-08T18:25:51.000Z
|
app/celery.py
|
TIHLDE/Lepton
|
60ec0793381f1c1b222f305586e8c2d4345fb566
|
[
"MIT"
] | 251
|
2021-03-04T19:19:14.000Z
|
2022-03-31T14:47:53.000Z
|
app/celery.py
|
tihlde/Lepton
|
5cab3522c421b76373a5c25f49267cfaef7b826a
|
[
"MIT"
] | 3
|
2021-10-05T19:03:04.000Z
|
2022-02-25T13:32:09.000Z
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
app = Celery("app")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
app.conf.update(
task_serializer="json",
accept_content=["json"], # Ignore other content
result_serializer="json",
timezone="Europe/Oslo",
enable_utc=True,
)
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
| 27
| 66
| 0.751964
| 122
| 891
| 5.352459
| 0.590164
| 0.042879
| 0.061256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001305
| 0.140292
| 891
| 32
| 67
| 27.84375
| 0.851175
| 0.392817
| 0
| 0
| 0
| 0
| 0.187617
| 0.041276
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.235294
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9ed4d324eb619f1707025aa2d1ca6c25ef2609
| 17,230
|
py
|
Python
|
src/finn/util/basic.py
|
quetric/finn-base-1
|
1494a13a430c784683c2c33288823f83d1cd6fed
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/util/basic.py
|
quetric/finn-base-1
|
1494a13a430c784683c2c33288823f83d1cd6fed
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/util/basic.py
|
quetric/finn-base-1
|
1494a13a430c784683c2c33288823f83d1cd6fed
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Xilinx nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import random
import string
import subprocess
import tempfile
import warnings
from finn.core.datatype import DataType
# mapping from PYNQ board names to FPGA part names
pynq_part_map = dict()
pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e"
pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1"
pynq_part_map["Pynq-Z2"] = "xc7z020clg400-1"
pynq_part_map["ZCU102"] = "xczu9eg-ffvb1156-2-e"
pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e"
# native AXI HP port width (in bits) for PYNQ boards
pynq_native_port_width = dict()
pynq_native_port_width["Pynq-Z1"] = 64
pynq_native_port_width["Pynq-Z2"] = 64
pynq_native_port_width["Ultra96"] = 128
pynq_native_port_width["ZCU102"] = 128
pynq_native_port_width["ZCU104"] = 128
# Alveo device and platform mappings
alveo_part_map = dict()
alveo_part_map["U50"] = "xcu50-fsvh2104-2L-e"
alveo_part_map["U200"] = "xcu200-fsgd2104-2-e"
alveo_part_map["U250"] = "xcu250-figd2104-2L-e"
alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e"
alveo_default_platform = dict()
alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3"
alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2"
alveo_default_platform["U250"] = "xilinx_u250_xdma_201830_2"
alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3"
def get_rtlsim_trace_depth():
"""Return the trace depth for rtlsim via PyVerilator. Controllable
via the RTLSIM_TRACE_DEPTH environment variable. If the env.var. is
undefined, the default value of 1 is returned. A trace depth of 1
will only show top-level signals and yield smaller .vcd files.
The following depth values are of interest for whole-network stitched IP
rtlsim:
- level 1 shows top-level input/output streams
- level 2 shows per-layer input/output streams
- level 3 shows per full-layer I/O including FIFO count signals
"""
try:
return int(os.environ["RTLSIM_TRACE_DEPTH"])
except KeyError:
return 1
def get_remote_vivado():
"""Return the address of the remote Vivado synthesis server as set by the,
REMOTE_VIVADO environment variable, otherwise return None"""
try:
return os.environ["REMOTE_VIVADO"]
except KeyError:
return None
def get_num_default_workers():
"""Return the number of workers for parallel transformations. Controllable
via the NUM_DEFAULT_WORKERS environment variable. If the env.var. is
undefined, the default value of 1 is returned.
"""
try:
return int(os.environ["NUM_DEFAULT_WORKERS"])
except KeyError:
return 1
def get_finn_root():
"Return the root directory that FINN is cloned into."
try:
return os.environ["FINN_ROOT"]
except KeyError:
raise Exception(
"""Environment variable FINN_ROOT must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
def get_execution_error_thresh():
"Return the max error that is allowed for rounding in FINN execution."
try:
return float(os.environ["ERROR_THRESH"])
except KeyError:
return 1e-2
def get_sanitize_quant_tensors():
"""Return whether tensors with quantization annotations should be sanitized.
Enabled by default, disabling will yield faster ONNX execution but may give
incorrect results. Use with caution."""
try:
return int(os.environ["SANITIZE_QUANT_TENSORS"])
except KeyError:
# enabled by default
return 1
def make_build_dir(prefix=""):
"""Creates a temporary folder with given prefix to be used as a build dir.
Use this function instead of tempfile.mkdtemp to ensure any generated files
will survive on the host after the FINN Docker container exits."""
try:
inst_prefix = os.environ["FINN_INST_NAME"] + "/"
return tempfile.mkdtemp(prefix=inst_prefix + prefix)
except KeyError:
raise Exception(
"""Environment variable FINN_INST_NAME must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
def get_by_name(container, name, name_field="name"):
"""Return item from container by .name field if it exists, None otherwise.
Will throw an Exception if multiple items are found, since this violates the
ONNX standard."""
names = [getattr(x, name_field) for x in container]
inds = [i for i, e in enumerate(names) if e == name]
if len(inds) > 1:
raise Exception("Found multiple get_by_name matches, undefined behavior")
elif len(inds) == 0:
return None
else:
ind = inds[0]
return container[ind]
def remove_by_name(container, name, name_field="name"):
"""Remove item from container by .name field if it exists."""
item = get_by_name(container, name, name_field)
if item is not None:
container.remove(item)
def random_string(stringLength=6):
"""Randomly generate a string of letters and digits."""
lettersAndDigits = string.ascii_letters + string.digits
return "".join(random.choice(lettersAndDigits) for i in range(stringLength))
def interleave_matrix_outer_dim_from_partitions(matrix, n_partitions):
"""Interleave the outermost dimension of a matrix from given
partitions (n_partitions)."""
if type(matrix) != np.ndarray or matrix.dtype != np.float32:
# try to convert to a float numpy array (container dtype is float)
matrix = np.asarray(matrix, dtype=np.float32)
shp = matrix.shape
ndim = matrix.ndim
# ensure # partitions evenly divide the outermost dimension
assert (
shp[0] % n_partitions == 0
), """The outermost dimension is not divisable
by the number of partitions."""
# only tested for matrices
assert (
ndim == 2
), """The dimension of the matrix is not 2. Currently this function
only works for matrices."""
# interleave rows between PEs using reshape + transpose
matrix_r = matrix.reshape(-1, n_partitions, shp[1]).transpose((1, 0, 2))
matrix_r = matrix_r.reshape(n_partitions, -1, shp[1])
return matrix_r
def roundup_to_integer_multiple(x, factor):
"""Round up integer x to the nearest integer multiple of integer factor.
Returns x if factor is set to -1. Both x and factor must otherwise be
positive."""
# ensure integers
assert int(x) == x, "The input x is not an integer."
assert int(factor) == factor, "The input factor is not an integer."
# use -1 to indicate no padding needed
if factor == -1:
return x
# ensure positive values
assert factor > 0 and x > 0, "Factor and x are <= 0."
if x < factor:
return factor
else:
if x % factor == 0:
return x
else:
return x + (factor - (x % factor))
def pad_tensor_to_multiple_of(ndarray, pad_to_dims, val=0, distr_pad=False):
"""Pad each dimension of given NumPy ndarray using val, so that each
dimension is a multiple of the respective value in pad_to_dims. -1 means
do not pad that particular dimension. If distr_pad is False, all padding
will be inserted after the existing values; otherwise it will be split
evenly between before and after the existing values, with one extra value
inserted after if the padding amount is not divisible by two."""
if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
# try to convert to a float numpy array (container dtype is float)
ndarray = np.asarray(ndarray, dtype=np.float32)
assert ndarray.ndim == len(
pad_to_dims
), """The dimensions of the input
array don't match the length of the pad_to_dims value."""
# compute the desired shape
desired = zip(list(ndarray.shape), list(pad_to_dims))
desired = map(lambda x: roundup_to_integer_multiple(x[0], x[1]), desired)
desired = np.asarray(list(desired), dtype=np.int32)
current = np.asarray(ndarray.shape, dtype=np.int32)
pad_amt = desired - current
# add padding to get to the desired shape
if distr_pad:
pad_before = (pad_amt // 2).astype(np.int32)
pad_after = pad_amt - pad_before
pad_amt = list(zip(pad_before, pad_after))
else:
# all padding is added after the existing values
pad_amt = list(map(lambda x: (0, x), pad_amt))
ret = np.pad(ndarray, pad_amt, mode="constant", constant_values=val)
assert (
np.asarray(ret.shape, dtype=np.int32) == desired
).all(), """The
calculated output array doesn't match the desired/expected one."""
return ret
def calculate_matvec_accumulator_range(matrix, vec_dt):
"""Calculate the minimum and maximum possible result (accumulator) values
for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW)
with datatype vec_dt. Returns (acc_min, acc_max).
"""
min_weight = matrix.min()
max_weight = matrix.max()
perceptive_field_elems = matrix.shape[0]
min_input = vec_dt.min()
max_input = vec_dt.max()
# calculate minimum and maximum values of accumulator
# assume inputs span the whole range of the input datatype
acc_min = perceptive_field_elems * min(
min_weight * max_input,
min_weight * min_input,
max_weight * max_input,
max_weight * min_input,
)
acc_max = perceptive_field_elems * max(
min_weight * max_input,
min_weight * min_input,
max_weight * max_input,
max_weight * min_input,
)
return (acc_min, acc_max)
def gen_finn_dt_tensor(finn_dt, tensor_shape):
"""Generates random tensor in given shape and with given FINN DataType."""
if type(tensor_shape) == list:
tensor_shape = tuple(tensor_shape)
if finn_dt == DataType.BIPOLAR:
tensor_values = np.random.randint(2, size=tensor_shape)
tensor_values = 2 * tensor_values - 1
elif finn_dt == DataType.BINARY:
tensor_values = np.random.randint(2, size=tensor_shape)
elif "INT" in finn_dt.name or finn_dt == DataType.TERNARY:
tensor_values = np.random.randint(
finn_dt.min(), high=finn_dt.max() + 1, size=tensor_shape
)
else:
raise ValueError(
"Datatype {} is not supported, no tensor could be generated".format(finn_dt)
)
# always use float type as container
return tensor_values.astype(np.float32)
def calculate_signed_dot_prod_range(dt_a, dt_b, len):
"""Returns the (min,max) values a dot product between two signed vectors of
types dt_a and dt_b of len elements can take."""
assert (
dt_a.signed() and dt_b.signed()
), """The input values are not both
signed vectors."""
min_prod = 2 ** 30
max_prod = -(2 ** 30)
for a_val in [dt_a.min(), dt_a.max()]:
for b_val in [dt_b.min(), dt_b.max()]:
prod = a_val * b_val * len
if prod < min_prod:
min_prod = prod
if prod > max_prod:
max_prod = prod
return (min_prod, max_prod)
def sanitize_quant_values(model, node_tensors, execution_context, check_values=False):
"""Sanitize given list of tensors in execution_context by rounding values
that are supposed to be integers (as indicated by their quantization
annotation). Will raise an assertion if the amount of rounding is too large.
Returns the sanitized execution context.
If check_values is specified, an extra DataType.allowed() check will be
performed on any rounded tensors.
Background:
FINN uses floating point tensors as a carrier data type to represent
integers. Floating point arithmetic can introduce rounding errors, e.g.
(int_num * float_scale) / float_scale is not always equal to int_num.
We use this function to ensure that the values that are supposed to be
integers are indeed integers.
"""
for tensor in node_tensors:
dtype = model.get_tensor_datatype(tensor)
# floats don't need sanitization, skip to next
# introduces less quicker runtime
if dtype == DataType.FLOAT32:
continue
current_values = execution_context[tensor]
updated_values = current_values
has_to_be_rounded = False
# TODO: vectorize with numpy
for value in np.nditer(current_values):
if not dtype.allowed(value):
has_to_be_rounded = True
break
if has_to_be_rounded:
updated_values = np.round(current_values)
warnings.warn(
"The values of tensor {} can't be represented "
"with the set FINN datatype ({}), they will be rounded to match the "
"FINN datatype.".format(tensor, dtype)
)
# check if rounded values are not too far from original values
max_error = max(np.abs(current_values - updated_values).flatten())
if max_error <= get_execution_error_thresh():
if check_values is True:
# check again if values can now be represented with set finn datatype
# TODO: vectorize with numpy
for value in np.nditer(updated_values):
if not dtype.allowed(value):
raise Exception(
"""Values can't be represented with set
finn datatype ({}) for input {}""".format(
dtype, tensor
)
)
execution_context[tensor] = updated_values
else:
raise Exception(
"""Rounding error is too high to match set FINN
datatype ({}) for input {}""".format(
dtype, tensor
)
)
return execution_context
class CppBuilder:
"""Builds the g++ compiler command to produces the executable of the c++ code
in code_gen_dir which is passed to the function build() of this class."""
def __init__(self):
self.include_paths = []
self.cpp_files = []
self.executable_path = ""
self.code_gen_dir = ""
self.compile_components = []
self.compile_script = ""
def append_includes(self, library_path):
"""Adds given library path to include_paths list."""
self.include_paths.append(library_path)
def append_sources(self, cpp_file):
"""Adds given c++ file to cpp_files list."""
self.cpp_files.append(cpp_file)
def set_executable_path(self, path):
"""Sets member variable "executable_path" to given path."""
self.executable_path = path
def build(self, code_gen_dir):
"""Builds the g++ compiler command according to entries in include_paths
and cpp_files lists. Saves it in bash script in given folder and
executes it."""
# raise error if includes are empty
self.code_gen_dir = code_gen_dir
self.compile_components.append("g++ -o " + str(self.executable_path))
for cpp_file in self.cpp_files:
self.compile_components.append(cpp_file)
for lib in self.include_paths:
self.compile_components.append(lib)
bash_compile = ""
for component in self.compile_components:
bash_compile += str(component) + " "
self.compile_script = str(self.code_gen_dir) + "/compile.sh"
with open(self.compile_script, "w") as f:
f.write("#!/bin/bash \n")
f.write(bash_compile + "\n")
bash_command = ["bash", self.compile_script]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
| 38.9819
| 88
| 0.673593
| 2,388
| 17,230
| 4.710637
| 0.239112
| 0.006845
| 0.005867
| 0.010134
| 0.185617
| 0.138857
| 0.113699
| 0.090319
| 0.083207
| 0.061694
| 0
| 0.018194
| 0.243993
| 17,230
| 441
| 89
| 39.070295
| 0.845386
| 0.365061
| 0
| 0.196911
| 0
| 0
| 0.131913
| 0.012605
| 0
| 0
| 0
| 0.002268
| 0.030888
| 1
| 0.084942
| false
| 0
| 0.030888
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9f22dd58e0b2b2c094a6f1cf7277e84b5b669b
| 9,455
|
py
|
Python
|
mainmenu.py
|
jeffrypaul37/Hospital-Management-System
|
4ff08bed5387ca23e3f31dbbf46e625d8ae5807b
|
[
"Apache-2.0"
] | null | null | null |
mainmenu.py
|
jeffrypaul37/Hospital-Management-System
|
4ff08bed5387ca23e3f31dbbf46e625d8ae5807b
|
[
"Apache-2.0"
] | null | null | null |
mainmenu.py
|
jeffrypaul37/Hospital-Management-System
|
4ff08bed5387ca23e3f31dbbf46e625d8ae5807b
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
from tkcalendar import Calendar
from datetime import datetime
from datetime import date
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import askyesno
import re
import sqlite3
import tkinter.messagebox
import pandas as pd
import pandas as pd
import datetime
from dateutil import rrule, parser
today = date.today()
date1 = '05-10-2021'
date2 = '12-31-2050'
datesx = pd.date_range(today, date2).tolist()
conn = sqlite3.connect('database copy.db')
c = conn.cursor()
ids = []
class Application:
def __init__(self, master):
self.master = master
self.left = Frame(master, width=1000, height=800, bg='sea green')
self.left.pack(side=LEFT)
self.right = Frame(master, width=1000, height=800, bg='steelblue')
self.right.pack(side=RIGHT)
self.heading = Label(self.left, text="Appointments", font=('arial 40 bold'), fg='black', bg='sea green')
self.heading.place(x=0, y=0)
self.name = Label(self.left, text="Patient's Name", font=('arial 18 bold'), fg='black', bg='sea green')
self.name.place(x=0, y=100)
self.age = Label(self.left, text="Age", font=('arial 18 bold'), fg='black', bg='sea green')
self.age.place(x=0, y=140)
self.gender = Label(self.left, text="Gender", font=('arial 18 bold'), fg='black', bg='sea green')
self.gender.place(x=0, y=180)
self.location = Label(self.left, text="Location", font=('arial 18 bold'), fg='black', bg='sea green')
self.location.place(x=0, y=220)
self.date = Label(self.left, text="Appointment Date", font=('arial 18 bold'), fg='black', bg='sea green')
self.date.place(x=0, y=260)
self.time = Label(self.left, text="Appointment Time", font=('arial 18 bold'), fg='black', bg='sea green')
self.time.place(x=0, y=300)
self.phone = Label(self.left, text="Phone Number", font=('arial 18 bold'), fg='black', bg='sea green')
self.phone.place(x=0, y=340)
self.allergies = Label(self.left, text="Allergies", font=('arial 18 bold'), fg='black', bg='sea green')
self.allergies.place(x=0, y=380)
self.all_ent = Entry(self.left, width=30)
self.all_ent.place(x=250, y=380)
self.all_ent.insert(0, 'NONE')
self.chronic = Label(self.left, text="Chronic Conditions", font=('arial 18 bold'), fg='black', bg='sea green')
self.chronic.place(x=0, y=420)
self.chr_ent = Entry(self.left, width=30)
self.chr_ent.place(x=250, y=420)
self.chr_ent.insert(0, 'NONE')
self.bg = Label(self.left, text="Blood Group", font=('arial 18 bold'), fg='black', bg='sea green')
self.bg.place(x=0, y=460)
self.clicked3=StringVar()
self.clicked3.set("Select Blood Group")
self.bg_ent = OptionMenu(self.left,self.clicked3,*options3)
self.bg_ent.pack()
self.bg_ent.place(x=250, y=460)
self.name_ent = Entry(self.left, width=30)
self.name_ent.place(x=250, y=100)
self.age_ent = Entry(self.left, width=30)
self.age_ent.place(x=250, y=140)
self.clicked=StringVar()
self.clicked.set("Male")
self.gender_ent = OptionMenu(self.left,self.clicked,*options)
self.gender_ent.pack()
self.gender_ent.place(x=250, y=180)
self.location_ent=Entry(self.left,width=30)
self.location_ent.place(x=250, y=220)
self.clicked1=StringVar()
self.clicked1.set("Select Date")
self.date_ent = OptionMenu(self.left,self.clicked1,*options1)
self.date_ent.pack()
self.date_ent.place(x=250, y=260)
self.clicked2=StringVar()
self.clicked2.set("Select Time")
self.time_ent = OptionMenu(self.left,self.clicked2,*options2)
self.time_ent.pack()
self.time_ent.place(x=250, y=300)
self.phone_ent = Entry(self.left, width=30)
self.phone_ent.place(x=250, y=340)
self.submit = Button(self.left, text="Add Appointment", width=20, height=2, bg='steelblue', command=self.add_appointment)
self.submit.place(x=270, y=500)
self.submit = Button(self.left, text="View Appointments", width=20, height=2, bg='steelblue', command=self.view)
self.submit.place(x=600, y=100)
self.submit = Button(self.left, text="View/Update Patient Details", width=20, height=2, bg='steelblue', command=self.update)
self.submit.place(x=600, y=200)
self.submit = Button(self.left, text="Read Names", width=20, height=2, bg='steelblue', command=self.read)
self.submit.place(x=600, y=300)
self.submit = Button(self.left, text="Exit", width=20, height=2, bg='steelblue', command=self.quit)
self.submit.place(x=600, y=400)
sql2 = "SELECT ID FROM appointments"
self.result = c.execute(sql2)
for self.row in self.result:
self.id = self.row[0]
ids.append(self.id)
self.new = sorted(ids)
self.final_id = self.new[len(ids)-1]
self.logs = Label(self.right, text="Logs", font=('arial 28 bold'), fg='white', bg='steelblue')
self.logs.place(x=0, y=0)
self.box = Text(self.right, width=62, height=45)
self.box.place(x=20, y=60)
def add_appointment(self):
self.val1 = self.name_ent.get()
self.val2 = self.age_ent.get()
self.val3 = self.clicked.get()
self.val4 = self.location_ent.get()
self.val5 = self.clicked1.get()
self.val6 = self.clicked2.get()
self.val7 = self.phone_ent.get()
self.val8 = self.all_ent.get()
self.val9 = self.chr_ent.get()
self.val10 = self.clicked3.get()
pattern=re.compile("[7-9][0-9]{9}")
pattern=re.compile("[7-9][0-9]{9}")
pattern2=re.compile("[1-9]([0-9])*")
pattern1=re.compile(r'([A-Z])(\s*[A-Z])*$')
pattern.match(self.val7)
if self.val1 == '' or self.val2 == '' or self.val3 == '' or self.val4 == '' or self.val5 == '' or self.val6=='' or self.val7=='' or self.val10=='Select Blood Group' or self.val5=='Select Date' or self.val6=='Select Time':
print("ty",self.val3)
tkinter.messagebox.showinfo("Warning", "Please Fill Up All Boxes")
print(self.val3)
elif not(pattern1.match(self.val1)) or len(self.val1)<2:
tkinter.messagebox.showinfo("Warning","INVALID Name")
elif not(pattern2.match(self.val2)) or len(self.val2)>=3:
tkinter.messagebox.showinfo("Warning","INVALID Age")
elif not(pattern.match(self.val7)) or len(self.val7)>10:
tkinter.messagebox.showinfo("Warning", "INVALID Phone Number")
else:
sql = "INSERT INTO 'appointments' (name, age, gender, location, scheduled_time, phone,date,Allergies,Chronic_Conditions,Blood_Group) VALUES(?, ?, ?, ?, ?, ?,?,?,?,?)"
c.execute(sql, (self.val1, self.val2, self.val3, self.val4, self.val6, self.val7,self.val5,self.val8,self.val9,self.val10))
conn.commit()
tkinter.messagebox.showinfo("Success", "Appointment for " + str(self.val1) + " has been created" )
self.box.insert(END, '\n Appointment fixed for ' + str(self.val1) + '\n at ' + str(self.val5)+','+str(self.val6))
self.name_ent.delete(0,END)
self.age_ent.delete(0,END)
self.location_ent.delete(0,END)
self.phone_ent.delete(0,END)
self.clicked1.set("Select Date")
self.clicked2.set("Select Time")
self.clicked3.set("Select Blood Group")
self.chr_ent.delete(0,END)
self.all_ent.delete(0,END)
self.all_ent.insert(0, 'NONE')
self.chr_ent.insert(0, 'NONE')
def view(self):
import view
view.call()
def update(self):
import update
update.buildupdate()
def read(self):
import read
read.buildread()
def quit(self):
answer = askyesno(title='Confirm Exit', message='Are you sure you want to exit?')
if answer:
root.destroy()
root = Tk()
root.title("Shalom Clinic")
#root.geometry("1200x720+0+0")
root.attributes('-fullscreen', True)
root.resizable(0, 0)
Top = Frame(root, bd=1, relief=RIDGE)
Top.pack(side=TOP, fill=X)
Form = Frame(root, height=1)
Form.pack(side=TOP, pady=1)
lbl_title = Label(Top, text = "Shalom Clinic", font=('arial', 15))
lbl_title.pack(fill=X)
options=["Male","Female"]
options1=datesx
options2=["10:00:00","11:00:00","13:00:00"]
options3=["O+","O-","A+","A-","B+","B-","AB+","AB-"]
b = Application(root)
root.resizable(False, False)
root.mainloop()
| 31.20462
| 230
| 0.564886
| 1,271
| 9,455
| 4.160504
| 0.194335
| 0.04236
| 0.036309
| 0.03177
| 0.363275
| 0.261725
| 0.198185
| 0.110061
| 0.068079
| 0.068079
| 0
| 0.05374
| 0.28165
| 9,455
| 302
| 231
| 31.307947
| 0.724823
| 0.003067
| 0
| 0.078652
| 0
| 0.005618
| 0.143938
| 0.005701
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.095506
| 0
| 0.134831
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9f437ec901227c3a525ef2b2000464e450f945
| 3,399
|
py
|
Python
|
chia_tea/discord/commands/test_wallets.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 6
|
2021-08-05T21:31:15.000Z
|
2021-11-15T20:54:25.000Z
|
chia_tea/discord/commands/test_wallets.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 49
|
2021-08-05T19:33:08.000Z
|
2022-03-30T19:33:38.000Z
|
chia_tea/discord/commands/test_wallets.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 1
|
2022-01-09T17:08:32.000Z
|
2022-01-09T17:08:32.000Z
|
import os
import tempfile
import unittest
from datetime import datetime
from google.protobuf.json_format import ParseDict
from ...monitoring.MonitoringDatabase import MonitoringDatabase
from ...protobuf.generated.computer_info_pb2 import ADD, UpdateEvent
from ...protobuf.generated.monitoring_service_pb2 import DataUpdateRequest
from ...utils.testing import async_test
from .wallets import wallets_cmd
class TestWalletsCmd(unittest.TestCase):
@async_test
async def test_no_wallets_case(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
db_filepath = os.path.join(tmpdir, "temp.db")
with MonitoringDatabase(db_filepath):
messages = await wallets_cmd(db_filepath)
self.assertEqual(len(messages), 1)
self.assertTrue(messages[0].startswith("No wallets"))
@async_test
async def test_not_running_wallet_not_displayed(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
db_filepath = os.path.join(tmpdir, "temp.db")
now_timestamp = datetime.now().timestamp()
with MonitoringDatabase(db_filepath) as db:
event = ParseDict(
js_dict=dict(
event_type=ADD,
wallet=dict(
is_running=False,
is_synced=True,
),
),
message=UpdateEvent(),
)
request = DataUpdateRequest(
machine_id=1,
machine_name="machine A",
timestamp=now_timestamp,
events=[event],
)
db.store_data_update_request(request)
messages = await wallets_cmd(db_filepath)
self.assertEqual(len(messages), 1)
self.assertTrue(messages[0].startswith("No wallets"))
@async_test
async def test_display_running_wallet(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
db_filepath = os.path.join(tmpdir, "tmp.db")
now_timestamp = datetime.now().timestamp()
with MonitoringDatabase(db_filepath) as db:
update_events = [
ParseDict(
js_dict=dict(
event_type=ADD,
wallet=dict(
is_running=True,
is_synced=True,
),
),
message=UpdateEvent(),
),
]
request = DataUpdateRequest(
machine_id=1,
machine_name="machine A",
timestamp=now_timestamp,
events=update_events,
)
db.store_data_update_request(request)
messages = await wallets_cmd(db_filepath)
print(messages)
# no failure
self.assertEqual(len(messages), 1)
msg = messages[0]
self.assertFalse(msg.startswith("Traceback"))
# display online harvester
self.assertTrue("machine A" in msg)
self.assertIn("synchronized", msg)
| 35.40625
| 74
| 0.527214
| 301
| 3,399
| 5.76412
| 0.292359
| 0.051873
| 0.024207
| 0.029395
| 0.619597
| 0.591931
| 0.591931
| 0.591931
| 0.591931
| 0.591931
| 0
| 0.004878
| 0.396881
| 3,399
| 95
| 75
| 35.778947
| 0.841463
| 0.010297
| 0
| 0.584416
| 0
| 0
| 0.026183
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 1
| 0
| false
| 0
| 0.12987
| 0
| 0.142857
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a9f5b949039e60cbeefb54542ccaa4f60417abd
| 990
|
py
|
Python
|
render/PC_Normalisation.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 158
|
2020-08-19T18:13:28.000Z
|
2022-03-30T13:55:32.000Z
|
render/PC_Normalisation.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 28
|
2020-05-30T04:02:33.000Z
|
2022-03-30T15:46:38.000Z
|
render/PC_Normalisation.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 18
|
2020-08-19T19:52:38.000Z
|
2022-02-06T11:42:26.000Z
|
# Copyright (c) 2020. Hanchen Wang, [email protected]
import os, open3d, numpy as np
File_ = open('ModelNet_flist_short.txt', 'w')
if __name__ == "__main__":
root_dir = "../data/ModelNet_subset/"
for root, dirs, files in os.walk(root_dir, topdown=False):
for file in files:
if '.ply' in file:
amesh = open3d.io.read_triangle_mesh(os.path.join(root, file))
out_file_name = os.path.join(root, file).replace('.ply', '_normalised.obj')
center = amesh.get_center()
amesh.translate(-center)
maxR = (np.asarray(amesh.vertices)**2).sum(axis=1).max()**(1/2)
# we found divided by (2*maxR) has best rendered visualisation results
amesh.scale(1/(2*maxR))
open3d.io.write_triangle_mesh(out_file_name, amesh)
File_.writelines(out_file_name.replace('.obj', '').replace(root_dir, '') + '\n')
print(out_file_name)
| 41.25
| 96
| 0.586869
| 131
| 990
| 4.206107
| 0.557252
| 0.050817
| 0.079855
| 0.050817
| 0.065336
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023578
| 0.271717
| 990
| 23
| 97
| 43.043478
| 0.740638
| 0.119192
| 0
| 0
| 0
| 0
| 0.099078
| 0.0553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa08817091e8a101312819073f37fbcd1819291
| 20,634
|
py
|
Python
|
pymatgen/apps/battery/insertion_battery.py
|
adozier/pymatgen
|
f1cc4d8db24ec11063be2fd84b4ea911f006eeb7
|
[
"MIT"
] | 18
|
2019-06-15T18:08:21.000Z
|
2022-01-30T05:01:29.000Z
|
ComRISB/pyextern/pymatgen/pymatgen/apps/battery/insertion_battery.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | null | null | null |
ComRISB/pyextern/pymatgen/pymatgen/apps/battery/insertion_battery.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | 11
|
2019-06-05T02:57:55.000Z
|
2021-12-29T02:54:25.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module is used for analysis of materials with potential application as
intercalation batteries.
"""
__author__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "[email protected]"
__date__ = "Jan 13, 2012"
__status__ = "Beta"
import itertools
from pymatgen.core.composition import Composition
from pymatgen.core.units import Charge, Time
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.apps.battery.battery_abc import AbstractElectrode, \
AbstractVoltagePair
from pymatgen.core.periodic_table import Element
from scipy.constants import N_A
class InsertionElectrode(AbstractElectrode):
"""
A set of topotactically related compounds, with different amounts of a
single element, e.g. TiO2 and LiTiO2, that can be used to define an
insertion battery electrode.
"""
def __init__(self, entries, working_ion_entry):
"""
Create a new InsertionElectrode.
Args:
entries: A list of ComputedStructureEntries (or subclasses)
representing the different topotactic states of the battery,
e.g. TiO2 and LiTiO2.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
"""
self._entries = entries
self._working_ion = working_ion_entry.composition.elements[0]
self._working_ion_entry = working_ion_entry
#Prepare to make phase diagram: determine elements and set their energy
#to be very high
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
#Set an artificial energy for each element for convex hull generation
element_energy = max([entry.energy_per_atom for entry in entries]) + 10
pdentries = []
pdentries.extend(entries)
pdentries.extend([PDEntry(Composition({el:1}), element_energy)
for el in elements])
#Make phase diagram to determine which entries are stable vs. unstable
pd = PhaseDiagram(pdentries)
lifrac = lambda e: e.composition.get_atomic_fraction(self._working_ion)
#stable entries ordered by amount of Li asc
self._stable_entries = tuple(sorted([e for e in pd.stable_entries
if e in entries], key=lifrac))
#unstable entries ordered by amount of Li asc
self._unstable_entries = tuple(sorted([e for e in pd.unstable_entries
if e in entries], key=lifrac))
#create voltage pairs
self._vpairs = tuple([InsertionVoltagePair(self._stable_entries[i],
self._stable_entries[i + 1],
working_ion_entry)
for i in range(len(self._stable_entries) - 1)])
@property
def working_ion(self):
"""
The working ion as an Element object
"""
return self._working_ion
@property
def working_ion_entry(self):
return self._working_ion_entry
@property
def voltage_pairs(self):
return self._vpairs
def get_stable_entries(self, charge_to_discharge=True):
"""
Get the stable entries.
Args:
charge_to_discharge: order from most charge to most discharged
state? Default to True.
Returns:
A list of stable entries in the electrode, ordered by amount of the
working ion.
"""
list_copy = list(self._stable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_unstable_entries(self, charge_to_discharge=True):
"""
Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion.
"""
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_all_entries(self, charge_to_discharge=True):
"""
Return all entries input for the electrode.
Args:
charge_to_discharge:
order from most charge to most discharged state? Defaults to
True.
Returns:
A list of all entries in the electrode (both stable and unstable),
ordered by amount of the working ion.
"""
all_entries = list(self.get_stable_entries())
all_entries.extend(self.get_unstable_entries())
#sort all entries by amount of working ion ASC
fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion)
all_entries = sorted([e for e in all_entries],
key=fsrt)
return all_entries if charge_to_discharge else all_entries.reverse()
@property
def fully_charged_entry(self):
"""
The most charged entry along the topotactic path.
"""
return self._stable_entries[0]
@property
def fully_discharged_entry(self):
"""
The most discharged entry along the topotactic path.
"""
return self._stable_entries[-1]
def get_max_instability(self, min_voltage=None, max_voltage=None):
"""
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return max(data) if len(data) > 0 else None
def get_min_instability(self, min_voltage=None, max_voltage=None):
"""
The minimum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Minimum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return min(data) if len(data) > 0 else None
def get_max_muO2(self, min_voltage=None, max_voltage=None):
"""
Maximum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return max(data) if len(data) > 0 else None
def get_min_muO2(self, min_voltage=None, max_voltage=None):
"""
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return min(data) if len(data) > 0 else None
def get_sub_electrodes(self, adjacent_only=True, include_myself=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set True.
include_myself: Include this identical electrode in the list of
results.
Returns:
A list of InsertionElectrode objects
"""
battery_list = []
pair_it = self._vpairs if adjacent_only \
else itertools.combinations_with_replacement(self._vpairs, 2)
ion = self._working_ion
for pair in pair_it:
entry_charge = pair.entry_charge if adjacent_only \
else pair[0].entry_charge
entry_discharge = pair.entry_discharge if adjacent_only \
else pair[1].entry_discharge
chg_frac = entry_charge.composition.get_atomic_fraction(ion)
dischg_frac = entry_discharge.composition.get_atomic_fraction(ion)
def in_range(entry):
frac = entry.composition.get_atomic_fraction(ion)
return chg_frac <= frac <= dischg_frac
if include_myself or entry_charge != self.fully_charged_entry \
or entry_discharge != self.fully_discharged_entry:
unstable_entries = filter(in_range,
self.get_unstable_entries())
stable_entries = filter(in_range, self.get_stable_entries())
all_entries = list(stable_entries)
all_entries.extend(unstable_entries)
battery_list.append(self.__class__(all_entries,
self.working_ion_entry))
return battery_list
def as_dict_summary(self, print_subelectrodes=True):
"""
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"formula_discharge": dischg_comp.reduced_formula,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability()}
if print_subelectrodes:
f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d
def __str__(self):
return self.__repr__()
def __repr__(self):
output = []
chg_form = self.fully_charged_entry.composition.reduced_formula
dischg_form = self.fully_discharged_entry.composition.reduced_formula
output.append("InsertionElectrode with endpoints at {} and {}".format(
chg_form, dischg_form))
output.append("Avg. volt. = {} V".format(self.get_average_voltage()))
output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav()))
output.append("Vol. cap. = {}".format(self.get_capacity_vol()))
return "\n".join(output)
@classmethod
def from_dict(cls, d):
from monty.json import MontyDecoder
dec = MontyDecoder()
return cls(dec.process_decoded(d["entries"]),
dec.process_decoded(d["working_ion_entry"]))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self._entries],
"working_ion_entry": self.working_ion_entry.as_dict()}
class InsertionVoltagePair(AbstractVoltagePair):
"""
Defines an Insertion Voltage Pair.
Args:
entry1: Entry corresponding to one of the entries in the voltage step.
entry2: Entry corresponding to the other entry in the voltage step.
working_ion_entry: A single ComputedEntry or PDEntry representing
the element that carries charge across the battery, e.g. Li.
"""
def __init__(self, entry1, entry2, working_ion_entry):
#initialize some internal variables
working_element = working_ion_entry.composition.elements[0]
entry_charge = entry1
entry_discharge = entry2
if entry_charge.composition.get_atomic_fraction(working_element) \
> entry2.composition.get_atomic_fraction(working_element):
(entry_charge, entry_discharge) = (entry_discharge, entry_charge)
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
ion_sym = working_element.symbol
frame_charge_comp = Composition({el: comp_charge[el]
for el in comp_charge
if el.symbol != ion_sym})
frame_discharge_comp = Composition({el: comp_discharge[el]
for el in comp_discharge
if el.symbol != ion_sym})
#Data validation
#check that the ion is just a single element
if not working_ion_entry.composition.is_element:
raise ValueError("VoltagePair: The working ion specified must be "
"an element")
#check that at least one of the entries contains the working element
if not comp_charge.get_atomic_fraction(working_element) > 0 and \
not comp_discharge.get_atomic_fraction(working_element) > 0:
raise ValueError("VoltagePair: The working ion must be present in "
"one of the entries")
#check that the entries do not contain the same amount of the workin
#element
if comp_charge.get_atomic_fraction(working_element) == \
comp_discharge.get_atomic_fraction(working_element):
raise ValueError("VoltagePair: The working ion atomic percentage "
"cannot be the same in both the entries")
#check that the frameworks of the entries are equivalent
if not frame_charge_comp.reduced_formula == \
frame_discharge_comp.reduced_formula:
raise ValueError("VoltagePair: the specified entries must have the"
" same compositional framework")
#Initialize normalization factors, charged and discharged entries
valence_list = Element(ion_sym).oxidation_states
working_ion_valence = max(valence_list)
(self.framework,
norm_charge) = frame_charge_comp.get_reduced_composition_and_factor()
norm_discharge = \
frame_discharge_comp.get_reduced_composition_and_factor()[1]
self._working_ion_entry = working_ion_entry
#Initialize normalized properties
self._vol_charge = entry_charge.structure.volume / norm_charge
self._vol_discharge = entry_discharge.structure.volume / norm_discharge
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
self._mass_charge = comp_charge.weight / norm_charge
self._mass_discharge = comp_discharge.weight / norm_discharge
self._num_ions_transferred = \
(comp_discharge[working_element] / norm_discharge) \
- (comp_charge[working_element] / norm_charge)
self._voltage = \
(((entry_charge.energy / norm_charge) -
(entry_discharge.energy / norm_discharge)) / \
self._num_ions_transferred + working_ion_entry.energy_per_atom) / working_ion_valence
self._mAh = self._num_ions_transferred * Charge(1, "e").to("C") * \
Time(1, "s").to("h") * N_A * 1000 * working_ion_valence
#Step 4: add (optional) hull and muO2 data
self.decomp_e_charge = \
entry_charge.data.get("decomposition_energy", None)
self.decomp_e_discharge = \
entry_discharge.data.get("decomposition_energy", None)
self.muO2_charge = entry_charge.data.get("muO2", None)
self.muO2_discharge = entry_discharge.data.get("muO2", None)
self.entry_charge = entry_charge
self.entry_discharge = entry_discharge
self.normalization_charge = norm_charge
self.normalization_discharge = norm_discharge
self._frac_charge = comp_charge.get_atomic_fraction(working_element)
self._frac_discharge = \
comp_discharge.get_atomic_fraction(working_element)
@property
def frac_charge(self):
return self._frac_charge
@property
def frac_discharge(self):
return self._frac_discharge
@property
def voltage(self):
return self._voltage
@property
def mAh(self):
return self._mAh
@property
def mass_charge(self):
return self._mass_charge
@property
def mass_discharge(self):
return self._mass_discharge
@property
def vol_charge(self):
return self._vol_charge
@property
def vol_discharge(self):
return self._vol_discharge
@property
def working_ion_entry(self):
return self._working_ion_entry
def __repr__(self):
output = ["Insertion voltage pair with working ion {}"
.format(self._working_ion_entry.composition.reduced_formula),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"mass_charge = {}, mass_discharge = {}"
.format(self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}"
.format(self.vol_charge, self.vol_discharge),
"frac_charge = {}, frac_discharge = {}"
.format(self.frac_charge, self.frac_discharge)]
return "\n".join(output)
def __str__(self):
return self.__repr__()
| 38.932075
| 97
| 0.630804
| 2,419
| 20,634
| 5.11575
| 0.144274
| 0.034747
| 0.026667
| 0.008404
| 0.420525
| 0.366869
| 0.305616
| 0.265697
| 0.250182
| 0.234667
| 0
| 0.00532
| 0.298585
| 20,634
| 529
| 98
| 39.005671
| 0.84972
| 0.235146
| 0
| 0.225352
| 0
| 0
| 0.06946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.035211
| 0.049296
| 0.260563
| 0.010563
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa0ecacfe2573f92054f8caab4ad37415452b90
| 7,202
|
py
|
Python
|
python/GafferUI/ColorSwatchPlugValueWidget.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 561
|
2016-10-18T04:30:48.000Z
|
2022-03-30T06:52:04.000Z
|
python/GafferUI/ColorSwatchPlugValueWidget.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 1,828
|
2016-10-14T19:01:46.000Z
|
2022-03-30T16:07:19.000Z
|
python/GafferUI/ColorSwatchPlugValueWidget.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 120
|
2016-10-18T15:19:13.000Z
|
2021-12-20T16:28:23.000Z
|
##########################################################################
#
# Copyright (c) 2013, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import imath
import Gaffer
import GafferUI
class ColorSwatchPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plugs, **kw ) :
self.__swatch = GafferUI.ColorSwatch()
GafferUI.PlugValueWidget.__init__( self, self.__swatch, plugs, **kw )
## \todo How do set maximum height with a public API?
self.__swatch._qtWidget().setMaximumHeight( 20 )
self._addPopupMenu( self.__swatch )
self.__swatch.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
self.__swatch.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ), scoped = False )
self.__swatch.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ), scoped = False )
self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False )
self._updateFromPlugs()
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.__swatch.setHighlighted( highlighted )
def _updateFromPlugs( self ) :
with self.getContext() :
value = _colorFromPlugs( self.getPlugs() )
self.__swatch.setColor( value )
def __buttonPress( self, widget, event ) :
if event.buttons == event.Buttons.Left :
return True
return False
def __dragBegin( self, widget, event ) :
GafferUI.Pointer.setCurrent( "rgba" )
return self.__swatch.getColor()
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.setCurrent( None )
def __buttonRelease( self, widget, event ) :
if event.button != event.Buttons.Left :
return False
if not self._editable() :
return False
_ColorPlugValueDialogue.acquire( self.getPlugs() )
return True
def _colorFromPlugs( plugs ) :
if not len( plugs ) :
return imath.Color4f( 0 )
# ColorSwatch only supports one colour, and doesn't have
# an "indeterminate" state, so when we have multiple plugs
# the best we can do is take an average.
return sum( p.getValue() for p in plugs ) / len( plugs )
## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a
# PlugValueDialogue base class to share some of the work with the dialogue made by the
# SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should
# actually be functionality of CompoundEditor?
class _ColorPlugValueDialogue( GafferUI.ColorChooserDialogue ) :
def __init__( self, plugs, parentWindow ) :
GafferUI.ColorChooserDialogue.__init__(
self,
color = _colorFromPlugs( plugs )
)
# we use these to decide which actions to merge into a single undo
self.__lastChangedReason = None
self.__mergeGroupId = 0
self.__colorChangedConnection = self.colorChooser().colorChangedSignal().connect( Gaffer.WeakMethod( self.__colorChanged ), scoped = False )
self.confirmButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.cancelButton.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__plugs = plugs
self.__initialValues = { p : p.getValue() for p in self.__plugs }
nodes = { p.node() for p in self.__plugs }
self.__plugSetConnections = [ n.plugSetSignal().connect( Gaffer.WeakMethod( self.__plugSet ), scoped = False ) for n in nodes ]
for node in nodes :
node.parentChangedSignal().connect( Gaffer.WeakMethod( self.__destroy ), scoped = False )
plug = next( iter( self.__plugs ) )
if len( self.__plugs ) == 1 :
self.setTitle( plug.relativeName( plug.ancestor( Gaffer.ScriptNode ) ) )
else :
self.setTitle( "{} plugs".format( len( self.__plugs ) ) )
self.__plugSet( plug )
parentWindow.addChildWindow( self, removeOnClose = True )
@classmethod
def acquire( cls, plugs ) :
plug = next( iter( plugs ) )
script = plug.node().scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
for window in scriptWindow.childWindows() :
if isinstance( window, cls ) and window.__plugs == plugs :
window.setVisible( True )
return window
window = _ColorPlugValueDialogue( plugs, scriptWindow )
window.setVisible( True )
return False
def __plugSet( self, plug ) :
if plug in self.__plugs :
with Gaffer.BlockedConnection( self.__colorChangedConnection ) :
self.colorChooser().setColor( _colorFromPlugs( self.__plugs ) )
def __colorChanged( self, colorChooser, reason ) :
if not GafferUI.ColorChooser.changesShouldBeMerged( self.__lastChangedReason, reason ) :
self.__mergeGroupId += 1
self.__lastChangedReason = reason
with Gaffer.UndoScope(
next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ),
mergeGroup = "ColorPlugValueDialogue%d%d" % ( id( self, ), self.__mergeGroupId )
) :
with Gaffer.BlockedConnection( self.__plugSetConnections ) :
for plug in self.__plugs :
plug.setValue( self.colorChooser().getColor() )
def __buttonClicked( self, button ) :
if button is self.cancelButton :
with Gaffer.UndoScope( next( iter( self.__plugs ) ).ancestor( Gaffer.ScriptNode ) ) :
for p, v in self.__initialValues.items() :
p.setValue( v )
self.parent().removeChild( self )
# Workaround for https://bugreports.qt-project.org/browse/QTBUG-26761.
assert( not self.visible() )
GafferUI.WidgetAlgo.keepUntilIdle( self )
def __destroy( self, *unused ) :
self.parent().removeChild( self )
| 34.625
| 142
| 0.71494
| 836
| 7,202
| 5.998804
| 0.366029
| 0.02333
| 0.041276
| 0.048455
| 0.12323
| 0.094118
| 0.078166
| 0.078166
| 0.078166
| 0.051047
| 0
| 0.003362
| 0.173979
| 7,202
| 207
| 143
| 34.792271
| 0.839637
| 0.317273
| 0
| 0.097087
| 0
| 0
| 0.008049
| 0.005507
| 0
| 0
| 0
| 0.004831
| 0.009709
| 1
| 0.135922
| false
| 0
| 0.038835
| 0
| 0.291262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa3ebc9e71e06ddfc092d3a9a924b404661453e
| 2,019
|
py
|
Python
|
osh/cmd_exec_test.py
|
rhencke/oil
|
c40004544e47ee78cde1fcb22c672162b8eb2cd2
|
[
"Apache-2.0"
] | 1
|
2019-01-25T01:15:51.000Z
|
2019-01-25T01:15:51.000Z
|
osh/cmd_exec_test.py
|
rhencke/oil
|
c40004544e47ee78cde1fcb22c672162b8eb2cd2
|
[
"Apache-2.0"
] | null | null | null |
osh/cmd_exec_test.py
|
rhencke/oil
|
c40004544e47ee78cde1fcb22c672162b8eb2cd2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_exec_test.py: Tests for cmd_exec.py
"""
import unittest
from core import test_lib
from core.meta import syntax_asdl, Id
from osh import state
suffix_op = syntax_asdl.suffix_op
osh_word = syntax_asdl.word
word_part = syntax_asdl.word_part
def InitEvaluator():
word_ev = test_lib.MakeTestEvaluator()
state.SetLocalString(word_ev.mem, 'x', 'xxx')
state.SetLocalString(word_ev.mem, 'y', 'yyy')
return word_ev
class ExpansionTest(unittest.TestCase):
def testBraceExpand(self):
arena = test_lib.MakeArena('<cmd_exec_test.py>')
c_parser = test_lib.InitCommandParser('echo _{a,b}_', arena=arena)
node = c_parser._ParseCommandLine()
print(node)
ex = test_lib.InitExecutor(arena=arena)
#print(ex.Execute(node))
#print(ex._ExpandWords(node.words))
class VarOpTest(unittest.TestCase):
def testVarOps(self):
ev = InitEvaluator() # initializes x=xxx and y=yyy
unset_sub = word_part.BracedVarSub(syntax_asdl.token(Id.VSub_Name, 'unset'))
part_vals = []
ev._EvalWordPart(unset_sub, part_vals)
print(part_vals)
set_sub = word_part.BracedVarSub(syntax_asdl.token(Id.VSub_Name, 'x'))
part_vals = []
ev._EvalWordPart(set_sub, part_vals)
print(part_vals)
# Now add some ops
part = word_part.LiteralPart(syntax_asdl.token(Id.Lit_Chars, 'default'))
arg_word = osh_word.CompoundWord([part])
test_op = suffix_op.StringUnary(Id.VTest_ColonHyphen, arg_word)
unset_sub.suffix_op = test_op
set_sub.suffix_op = test_op
part_vals = []
ev._EvalWordPart(unset_sub, part_vals)
print(part_vals)
part_vals = []
ev._EvalWordPart(set_sub, part_vals)
print(part_vals)
if __name__ == '__main__':
unittest.main()
| 26.92
| 80
| 0.722635
| 295
| 2,019
| 4.674576
| 0.4
| 0.069616
| 0.029007
| 0.063814
| 0.279913
| 0.214648
| 0.214648
| 0.214648
| 0.214648
| 0.214648
| 0
| 0.004731
| 0.162457
| 2,019
| 74
| 81
| 27.283784
| 0.810763
| 0.209014
| 0
| 0.27907
| 0
| 0
| 0.037365
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.093023
| 0
| 0.232558
| 0.116279
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa50be39d8821cc01c657b693bc988aa6fe4578
| 5,265
|
py
|
Python
|
temp/discrete_a2c_agent.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
temp/discrete_a2c_agent.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
temp/discrete_a2c_agent.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | 1
|
2021-11-23T12:30:37.000Z
|
2021-11-23T12:30:37.000Z
|
import numpy as np
import torch
import torch.nn.functional as F
from codes.d_agents.a0_base_agent import float32_preprocessor
from codes.d_agents.on_policy.on_policy_agent import OnPolicyAgent
from codes.e_utils import rl_utils, replay_buffer
from codes.d_agents.actions import ProbabilityActionSelector
from codes.e_utils.names import DeepLearningModelName, AgentMode
class AgentDiscreteA2C(OnPolicyAgent):
"""
"""
def __init__(
self, worker_id, input_shape, num_outputs,
train_action_selector, test_and_play_action_selector, params, device
):
assert isinstance(train_action_selector, ProbabilityActionSelector)
assert isinstance(test_and_play_action_selector, ProbabilityActionSelector)
assert params.DEEP_LEARNING_MODEL in [
DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP,
DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN
]
super(AgentDiscreteA2C, self).__init__(train_action_selector, test_and_play_action_selector, params, device)
self.__name__ = "AgentDiscreteA2C"
self.worker_id = worker_id
self.model = rl_utils.get_rl_model(
worker_id=worker_id, input_shape=input_shape, num_outputs=num_outputs, params=params, device=self.device
)
if self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_MLP:
self.actor_optimizer = rl_utils.get_optimizer(
parameters=self.model.base.actor.parameters(),
learning_rate=self.params.ACTOR_LEARNING_RATE,
params=params
)
self.critic_optimizer = rl_utils.get_optimizer(
parameters=self.model.base.critic.parameters(),
learning_rate=self.params.LEARNING_RATE,
params=params
)
elif self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.DISCRETE_STOCHASTIC_ACTOR_CRITIC_CNN:
self.optimizer = rl_utils.get_optimizer(
parameters=list(self.model.base.common_conv.parameters()) + list(self.model.base.critic_fc.parameters()),
learning_rate=self.params.LEARNING_RATE,
params=params
)
else:
raise ValueError()
self.buffer = replay_buffer.ExperienceReplayBuffer(
experience_source=None, buffer_size=self.params.BATCH_SIZE
)
def __call__(self, states, critics=None):
if not isinstance(states, torch.FloatTensor):
states = float32_preprocessor(states).to(self.device)
logits_v = self.model.base.forward_actor(states)
probs_v = F.softmax(logits_v, dim=1)
probs = probs_v.data.cpu().numpy()
if self.agent_mode == AgentMode.TRAIN:
actions = np.array(self.train_action_selector(probs))
else:
actions = np.array(self.test_and_play_action_selector(probs))
critics = torch.zeros(size=probs_v.size())
return actions, critics
def train(self, step_idx):
# Lucky Episode에서 얻어낸 batch를 통해 학습할 때와, Unlucky Episode에서 얻어낸 batch를 통해 학습할 때마다 NN의 파라미터들이
# 서로 다른 방향으로 반복적으로 휩쓸려가듯이 학습이 됨 --> Gradients의 Variance가 매우 큼
batch = self.buffer.sample(batch_size=None)
# states_v.shape: (32, 3)
# actions_v.shape: (32, 1)
# target_action_values_v.shape: (32,)
states_v, actions_v, target_action_values_v = self.unpack_batch_for_actor_critic(
batch=batch, target_model=self.model, params=self.params
)
logits_v, value_v = self.model(states_v)
# Critic Optimization
self.critic_optimizer.zero_grad()
loss_critic_v = F.mse_loss(input=value_v.squeeze(-1), target=target_action_values_v)
loss_critic_v.backward(retain_graph=True)
#nn_utils.clip_grad_norm_(self.model.base.critic.parameters(), self.params.CLIP_GRAD)
self.critic_optimizer.step()
# Actor Optimization
self.actor_optimizer.zero_grad()
# advantage_v.shape: (32,)
advantage_v = target_action_values_v - value_v.squeeze(-1).detach()
log_pi_v = F.log_softmax(logits_v, dim=1)
log_pi_action_v = log_pi_v.gather(dim=1, index=actions_v.unsqueeze(-1)).squeeze(-1)
reinforced_log_pi_action_v = advantage_v * log_pi_action_v
#print(actions_v.size(), advantage_v.size(), log_pi_v.size(), log_pi_action_v.size(), reinforced_log_pi_action_v.size())
loss_actor_v = -1.0 * reinforced_log_pi_action_v.mean()
prob_v = F.softmax(logits_v, dim=1)
entropy_v = -1.0 * (prob_v * log_pi_v).sum(dim=1).mean()
loss_entropy_v = -1.0 * self.params.ENTROPY_LOSS_WEIGHT * entropy_v
# loss_actor_v를 작아지도록 만듦 --> log_pi_v.mean()가 커지도록 만듦
# loss_entropy_v를 작아지도록 만듦 --> entropy_v가 커지도록 만듦
loss_actor_and_entropy_v = loss_actor_v + loss_entropy_v
loss_actor_and_entropy_v.backward()
#nn_utils.clip_grad_norm_(self.model.base.actor.parameters(), self.params.CLIP_GRAD)
self.actor_optimizer.step()
gradients = self.model.get_gradients_for_current_parameters()
return gradients, loss_critic_v.item(), loss_actor_v.item() * -1.0
| 41.785714
| 128
| 0.688319
| 682
| 5,265
| 4.957478
| 0.247801
| 0.029281
| 0.026915
| 0.021295
| 0.347826
| 0.236025
| 0.17273
| 0.160899
| 0.14197
| 0.078675
| 0
| 0.008551
| 0.222602
| 5,265
| 126
| 129
| 41.785714
| 0.817493
| 0.129725
| 0
| 0.084337
| 0
| 0
| 0.00351
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 1
| 0.036145
| false
| 0
| 0.096386
| 0
| 0.168675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa6e5ef18ddd1cd84d84ba40a68b3ca12d3ecf7
| 789
|
py
|
Python
|
apps/core/forms.py
|
allexvissoci/djangoecommerce
|
645c05daa5f13c1e42184a7c6f534b9c260d280a
|
[
"CC0-1.0"
] | null | null | null |
apps/core/forms.py
|
allexvissoci/djangoecommerce
|
645c05daa5f13c1e42184a7c6f534b9c260d280a
|
[
"CC0-1.0"
] | null | null | null |
apps/core/forms.py
|
allexvissoci/djangoecommerce
|
645c05daa5f13c1e42184a7c6f534b9c260d280a
|
[
"CC0-1.0"
] | null | null | null |
from django import forms
from django.core.mail import send_mail
from django.conf import settings
class ContactForm(forms.Form):
name = forms.CharField(label='Nome', required=True)
email = forms.EmailField(label='E-mail')
message = forms.CharField(label='Mensagem', widget=forms.Textarea(),
required=True)
def send_mail(self):
name = self.cleaned_data['name']
email = self.cleaned_data['email']
message = self.cleaned_data['message']
message = 'Nome: {0}\nE-mail:{1}\n{2}'.format(name, email, message)
send_mail(
'Contato Django E-commerce',
message,
settings.DEFAULT_FROM_EMAIL,
[settings.DEFAULT_FROM_EMAIL]
)
| 32.875
| 75
| 0.595691
| 89
| 789
| 5.168539
| 0.426966
| 0.065217
| 0.097826
| 0.104348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005357
| 0.290241
| 789
| 23
| 76
| 34.304348
| 0.816071
| 0
| 0
| 0
| 0
| 0
| 0.107731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa6ee42edcf06446ba2b86d62dbe6a27542ef2e
| 5,475
|
py
|
Python
|
Fchat/Gui/AddFriendWidget.py
|
jamesaxl/FreeSnake
|
3cef45165bce50d0f296e0d016b49d45aa31a653
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2018-11-15T22:55:01.000Z
|
2020-01-01T21:21:07.000Z
|
Fchat/Gui/AddFriendWidget.py
|
jamesaxl/FreeSnake
|
3cef45165bce50d0f296e0d016b49d45aa31a653
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-11-10T20:31:29.000Z
|
2021-07-31T18:24:47.000Z
|
Fchat/Gui/AddFriendWidget.py
|
jamesaxl/FreeSnake
|
3cef45165bce50d0f296e0d016b49d45aa31a653
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2018-11-15T22:55:17.000Z
|
2018-11-15T22:55:17.000Z
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, Gdk
class AddFriendWidget(Gtk.Box):
def __init__(self, main_window, fchat_prv, friend_list):
Gtk.Box.__init__(self, spacing=7, orientation = Gtk.Orientation.VERTICAL)
self.fchat_prv = fchat_prv
self.main_window = main_window
self.friend_list = friend_list
self.fchat_prv.add_friend_gui = self
self.generate_keys_bt = Gtk.Button('Generate Key')
self.generate_keys_bt.connect('clicked', self.on_generate_keys)
self.save_bt = Gtk.Button('Save')
self.save_bt.connect('clicked', self.on_save)
self.cancel_bt = Gtk.Button('Cancel')
self.cancel_bt.connect('clicked', self.on_cancel)
self.close_bt = Gtk.Button('Close')
self.close_bt.connect('clicked', self.on_close)
self.owner_info = Gtk.Entry()
self.owner_info.set_sensitive(False)
self.copy_clipboard_bt = Gtk.Button(label='Copy to clipboard')
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.copy_clipboard_bt.connect('clicked', self.on_copy_clipboard)
h_owner = Gtk.Box(spacing=5)
h_owner.pack_start(self.owner_info, True, True, 1)
h_owner.pack_start(self.copy_clipboard_bt, False, False, 1)
self.friend_info = Gtk.Entry()
self.friend_info.set_placeholder_text('Key of your friend')
self.spinner = Gtk.Spinner()
self.pack_start(h_owner, True, False, 7)
self.pack_start(self.friend_info, True, False, 7)
self.pack_start(self.spinner, True, False, 7)
h_bt = Gtk.Box()
h_bt.pack_start(self.generate_keys_bt, True, False, 7)
h_bt.pack_start(self.save_bt, True, False, 7)
h_bt.pack_start(self.cancel_bt, True, False, 7)
h_bt.pack_start(self.close_bt, True, False, 7)
self.pack_start(h_bt, True, False, 7)
self.job = None
def on_generate_keys(self, button):
self.pub, self.prv, self.pub_info_key, self.job = self.fchat_prv.generate_key_for_friend()
self.owner_info.set_text(self.pub_info_key)
self.on_generate_keys_start()
def on_generate_keys_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_generate_keys_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_generate_keys_faild(self, text):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_cancel(self, button):
if self.job:
self.job.remove_from_queue_when_finish()
def on_close(self, button):
self.main_window.application.back_main_window_or_friend_list()
def on_save(self, button):
if self.owner_info.get_text() == '':
self.msg_info('You should generate a key that contains your info')
return
if self.friend_info.get_text() == '':
self.msg_info('Friend info is required')
return
self.fchat_prv.add_friend(self.pub, self.prv, self.friend_info.get_text())
self.on_save_start()
def on_save_start(self):
self.spinner.show()
self.spinner.start()
self.friend_info.set_sensitive(False)
self.save_bt.set_sensitive(False)
self.close_bt.set_sensitive(False)
self.generate_keys_bt.set_sensitive(False)
self.copy_clipboard_bt.set_sensitive(False)
def on_save_start_ok(self):
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
self.friend_list.sync_friends_list()
def on_save_start_duplicate(self, text):
self.msg_info(text)
def on_save_start_faild(self):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Error adding friend please try later")
dialog.run()
dialog.destroy()
self.spinner.hide()
self.spinner.stop()
self.friend_info.set_sensitive(True)
self.save_bt.set_sensitive(True)
self.close_bt.set_sensitive(True)
self.generate_keys_bt.set_sensitive(True)
self.copy_clipboard_bt.set_sensitive(True)
def on_copy_clipboard(self, button):
self.clipboard.set_text(self.owner_info.get_text(), -1)
def msg_info(self, text):
dialog = Gtk.MessageDialog(self.main_window, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "Info")
dialog.format_secondary_text(text)
dialog.run()
dialog.destroy()
| 35.784314
| 107
| 0.667032
| 762
| 5,475
| 4.494751
| 0.137795
| 0.108613
| 0.098102
| 0.09927
| 0.547445
| 0.448759
| 0.418978
| 0.392701
| 0.392701
| 0.338394
| 0
| 0.004013
| 0.226301
| 5,475
| 152
| 108
| 36.019737
| 0.804533
| 0
| 0
| 0.403361
| 0
| 0
| 0.040183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.016807
| 0
| 0.159664
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa7a0ee45227c9db1cfad5be465b9e3f1596fbf
| 533
|
py
|
Python
|
python01/game.py
|
liyan2013/hogwarts
|
4b81d968b049a13cb2aa293d32c034ca3a30ee79
|
[
"Apache-2.0"
] | null | null | null |
python01/game.py
|
liyan2013/hogwarts
|
4b81d968b049a13cb2aa293d32c034ca3a30ee79
|
[
"Apache-2.0"
] | null | null | null |
python01/game.py
|
liyan2013/hogwarts
|
4b81d968b049a13cb2aa293d32c034ca3a30ee79
|
[
"Apache-2.0"
] | null | null | null |
import random
def game():
# 我的血量
my_hp = 1000
# 敌人的血量
enemy_hp = 1000
while True:
# 我受到随机的攻击,减少血量
my_hp = my_hp - random.randint(0, 50)
# 敌人收到随机的攻击,减少血量
enemy_hp = enemy_hp - random.randint(0, 50)
if my_hp <= 0:
# 如果我此时的血量<=0,则敌人赢了
print("敌人赢了")
# 退出循环
break
elif enemy_hp <= 0:
# 如果敌人此时的血量<=0,则我赢了
print("我赢了")
# 跳出循环
break
if __name__ == '__main__':
game()
| 17.766667
| 51
| 0.463415
| 60
| 533
| 3.85
| 0.533333
| 0.069264
| 0.12987
| 0.138528
| 0.155844
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059603
| 0.433396
| 533
| 29
| 52
| 18.37931
| 0.705298
| 0.159475
| 0
| 0.133333
| 0
| 0
| 0.034169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa7b0d58d58a3a7d6eb18bd016c9b5d7166087a
| 681
|
py
|
Python
|
petstore/api/api_response.py
|
andrii-grytsenko/io.swagger.petstore3.testing
|
81a0a16d574d0c0664b297e7ba7ff2bb5a9a0c40
|
[
"MIT"
] | null | null | null |
petstore/api/api_response.py
|
andrii-grytsenko/io.swagger.petstore3.testing
|
81a0a16d574d0c0664b297e7ba7ff2bb5a9a0c40
|
[
"MIT"
] | null | null | null |
petstore/api/api_response.py
|
andrii-grytsenko/io.swagger.petstore3.testing
|
81a0a16d574d0c0664b297e7ba7ff2bb5a9a0c40
|
[
"MIT"
] | null | null | null |
from enum import Enum
class ApiResponseType(Enum):
error = "Error"
warning = "Warning"
info = "Info"
ok = "OK"
too_busy = "Too busy"
class ApiResponse:
def __init__(self, code: int, response_type: ApiResponseType, message):
self.code = code
self.type = response_type
self.message = message
class ApiResponseError(Exception):
def __init__(self, response: ApiResponse, message="Api exception"):
self.response = response
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message}\n{self.response.code}: [{self.response.type}] {self.response.message}"
| 25.222222
| 102
| 0.654919
| 78
| 681
| 5.474359
| 0.358974
| 0.140515
| 0.051522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229075
| 681
| 26
| 103
| 26.192308
| 0.813333
| 0
| 0
| 0.105263
| 0
| 0.052632
| 0.180617
| 0.120411
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0.052632
| 0.684211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa7bee18a6d9f952d21c773ce61328493a8b54b
| 7,503
|
py
|
Python
|
test/integration/component/test_browse_templates2.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 1,131
|
2015-01-08T18:59:06.000Z
|
2022-03-29T11:31:10.000Z
|
test/integration/component/test_browse_templates2.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 5,908
|
2015-01-13T15:28:37.000Z
|
2022-03-31T20:31:07.000Z
|
test/integration/component/test_browse_templates2.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 1,083
|
2015-01-05T01:16:52.000Z
|
2022-03-31T12:14:10.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.codes import PASS, FAILED, SUCCESS, XEN_SERVER
from marvin.sshClient import SshClient
import requests
requests.packages.urllib3.disable_warnings()
import random
import string
import telnetlib
import os
import urllib.request, urllib.parse, urllib.error
import time
import tempfile
_multiprocess_shared_ = True
class TestBrowseUploadTemplate(cloudstackTestCase):
"""
Tests for browser based upload template feature. Once all issues in test_browse_templates.py are fixed, this should be merged back
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestBrowseUploadTemplate, cls).getClsTestClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls._cleanup = []
cls.cleanup = []
hosts = list_hosts(
cls.apiclient,
type="Routing"
)
if hosts is None:
cls.SkipTest(
"There are no hypervisor's available. Check list hosts response")
cls.uploadtemplateformat = "VHD"
cls.templatename = "test"
cls.templatehypervisor = "XenServer"
cls.templateostypeid = 142
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
cls._cleanup = [
cls.account
]
def waitForSystemVMAgent(self, vmname):
timeout = self.testdata["timeout"]
while True:
list_host_response = list_hosts(
self.apiclient,
name=vmname
)
if list_host_response and list_host_response[0].state == 'Up':
break
if timeout == 0:
raise Exception("Timed out waiting for SSVM agent to be Up")
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
def destroy_ssvm(self):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
ssvm_response = list_ssvm_response[0]
old_name = ssvm_response.name
self.debug("Destroying SSVM: %s" % ssvm_response.id)
cmd = destroySystemVm.destroySystemVmCmd()
cmd.id = ssvm_response.id
self.apiclient.destroySystemVm(cmd)
timeout = self.testdata["timeout"]
while True:
list_ssvm_response = list_ssvms(
self.apiclient,
zoneid=self.zone.id,
systemvmtype='secondarystoragevm'
)
if isinstance(list_ssvm_response, list):
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
raise Exception("List SSVM call failed!")
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
ssvm_response = list_ssvm_response[0]
# Verify Name, Public IP, Private IP and Link local IP
# for newly created SSVM
self.assertNotEqual(
ssvm_response.name,
old_name,
"Check SSVM new name with name of destroyed SSVM"
)
self.assertEqual(
hasattr(ssvm_response, 'privateip'),
True,
"Check whether SSVM has private IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'linklocalip'),
True,
"Check whether SSVM has link local IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'publicip'),
True,
"Check whether SSVM has public IP field"
)
# Wait for the agent to be up
self.waitForSystemVMAgent(ssvm_response.name)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_browser_upload_template_incomplete(self):
"""
Test browser based incomplete template upload, followed by SSVM destroy. Template should go to UploadAbandoned state and get cleaned up.
"""
try:
self.debug("========================= Test browser based incomplete template upload ========================")
#Only register template, without uploading
cmd = getUploadParamsForTemplate.getUploadParamsForTemplateCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadtemplateformat
cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))
cmd.account=self.account.name
cmd.domainid=self.domain.id
cmd.displaytext=cmd.name
cmd.hypervisor=self.templatehypervisor
cmd.ostypeid=self.templateostypeid
template_response=self.apiclient.getUploadParamsForTemplate(cmd)
#Destroy SSVM, and wait for new one to start
self.destroy_ssvm()
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
#Verify that the template is cleaned up as part of sync-up during new SSVM start
list_template_response=Template.list(
self.apiclient,
id=template_response.id,
templatefilter="all",
zoneid=self.zone.id)
self.assertEqual(list_template_response, None, "Template is not cleaned up, some issue with template sync-up")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
@classmethod
def tearDownClass(self):
try:
self.apiclient = super(TestBrowseUploadTemplate, self).getClsTestClient().getApiClient()
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
| 35.060748
| 144
| 0.620552
| 805
| 7,503
| 5.701863
| 0.33913
| 0.044444
| 0.027887
| 0.021786
| 0.160131
| 0.124619
| 0.06841
| 0.017865
| 0
| 0
| 0
| 0.003033
| 0.296815
| 7,503
| 213
| 145
| 35.225352
| 0.866945
| 0.174464
| 0
| 0.256579
| 0
| 0
| 0.124898
| 0.00799
| 0
| 0
| 0
| 0
| 0.039474
| 1
| 0.032895
| false
| 0.006579
| 0.118421
| 0
| 0.177632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aa818d9912fa4e7124c341cc827cf2ddf2f640c
| 11,501
|
py
|
Python
|
tests/components/ozw/test_websocket_api.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/ozw/test_websocket_api.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/ozw/test_websocket_api.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test OpenZWave Websocket API."""
from unittest.mock import patch
from openzwavemqtt.const import (
ATTR_CODE_SLOT,
ATTR_LABEL,
ATTR_OPTIONS,
ATTR_POSITION,
ATTR_VALUE,
ValueType,
)
from openpeerpower.components.ozw.const import ATTR_CONFIG_PARAMETER
from openpeerpower.components.ozw.lock import ATTR_USERCODE
from openpeerpower.components.ozw.websocket_api import (
ATTR_IS_AWAKE,
ATTR_IS_BEAMING,
ATTR_IS_FAILED,
ATTR_IS_FLIRS,
ATTR_IS_ROUTING,
ATTR_IS_SECURITYV1,
ATTR_IS_ZWAVE_PLUS,
ATTR_NEIGHBORS,
ATTR_NODE_BASIC_STRING,
ATTR_NODE_BAUD_RATE,
ATTR_NODE_GENERIC_STRING,
ATTR_NODE_QUERY_STAGE,
ATTR_NODE_SPECIFIC_STRING,
ID,
NODE_ID,
OZW_INSTANCE,
PARAMETER,
SCHEMA,
TYPE,
VALUE,
)
from openpeerpower.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
)
from .common import MQTTMessage, setup_ozw
async def test_websocket_api(opp, generic_data, opp_ws_client):
"""Test the ozw websocket api."""
await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
# Test instance list
await client.send_json({ID: 4, TYPE: "ozw/get_instances"})
msg = await client.receive_json()
assert len(msg["result"]) == 1
result = msg["result"][0]
assert result[OZW_INSTANCE] == 1
assert result["Status"] == "driverAllNodesQueried"
assert result["OpenZWave_Version"] == "1.6.1008"
# Test network status
await client.send_json({ID: 5, TYPE: "ozw/network_status"})
msg = await client.receive_json()
result = msg["result"]
assert result["Status"] == "driverAllNodesQueried"
assert result[OZW_INSTANCE] == 1
# Test node status
await client.send_json({ID: 6, TYPE: "ozw/node_status", NODE_ID: 32})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 32
assert result[ATTR_NODE_QUERY_STAGE] == "Complete"
assert result[ATTR_IS_ZWAVE_PLUS]
assert result[ATTR_IS_AWAKE]
assert not result[ATTR_IS_FAILED]
assert result[ATTR_NODE_BAUD_RATE] == 100000
assert result[ATTR_IS_BEAMING]
assert not result[ATTR_IS_FLIRS]
assert result[ATTR_IS_ROUTING]
assert not result[ATTR_IS_SECURITYV1]
assert result[ATTR_NODE_BASIC_STRING] == "Routing Slave"
assert result[ATTR_NODE_GENERIC_STRING] == "Binary Switch"
assert result[ATTR_NODE_SPECIFIC_STRING] == "Binary Power Switch"
assert result[ATTR_NEIGHBORS] == [1, 33, 36, 37, 39]
await client.send_json({ID: 7, TYPE: "ozw/node_status", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test node statistics
await client.send_json({ID: 8, TYPE: "ozw/node_statistics", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 39
assert result["send_count"] == 57
assert result["sent_failed"] == 0
assert result["retries"] == 1
assert result["last_request_rtt"] == 26
assert result["last_response_rtt"] == 38
assert result["average_request_rtt"] == 29
assert result["average_response_rtt"] == 37
assert result["received_packets"] == 3594
assert result["received_dup_packets"] == 12
assert result["received_unsolicited"] == 3546
# Test node metadata
await client.send_json({ID: 9, TYPE: "ozw/node_metadata", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result["metadata"]["ProductPic"] == "images/aeotec/zwa002.png"
await client.send_json({ID: 10, TYPE: "ozw/node_metadata", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test network statistics
await client.send_json({ID: 11, TYPE: "ozw/network_statistics"})
msg = await client.receive_json()
result = msg["result"]
assert result["readCnt"] == 92220
assert result[OZW_INSTANCE] == 1
assert result["node_count"] == 5
# Test get nodes
await client.send_json({ID: 12, TYPE: "ozw/get_nodes"})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 5
assert result[2][ATTR_IS_AWAKE]
assert not result[1][ATTR_IS_FAILED]
# Test get config parameters
await client.send_json({ID: 13, TYPE: "ozw/get_config_parameters", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 8
for config_param in result:
assert config_param["type"] in (
ValueType.LIST.value,
ValueType.BOOL.value,
ValueType.INT.value,
ValueType.BYTE.value,
ValueType.SHORT.value,
ValueType.BITSET.value,
)
# Test set config parameter
config_param = result[0]
current_val = config_param[ATTR_VALUE]
new_val = next(
option[0]
for option in config_param[SCHEMA][0][ATTR_OPTIONS]
if option[0] != current_val
)
new_label = next(
option[1]
for option in config_param[SCHEMA][0][ATTR_OPTIONS]
if option[1] != current_val and option[0] != new_val
)
await client.send_json(
{
ID: 14,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_val,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 15,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_label,
}
)
msg = await client.receive_json()
assert msg["success"]
# Test OZW Instance not found error
await client.send_json(
{ID: 16, TYPE: "ozw/get_config_parameters", OZW_INSTANCE: 999, NODE_ID: 1}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test OZW Node not found error
await client.send_json(
{
ID: 18,
TYPE: "ozw/set_config_parameter",
NODE_ID: 999,
PARAMETER: 0,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test parameter not found
await client.send_json(
{
ID: 19,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 45,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test list value not found
await client.send_json(
{
ID: 20,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test value type invalid
await client.send_json(
{
ID: 21,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: 0,
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_SUPPORTED
# Test invalid bitset format
await client.send_json(
{
ID: 22,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True, ATTR_LABEL: "test"},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_INVALID_FORMAT
# Test valid bitset format passes validation
await client.send_json(
{
ID: 23,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 10000,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
async def test_ws_locks(opp, lock_data, opp_ws_client):
"""Test lock websocket apis."""
await setup_ozw(opp, fixture=lock_data)
client = await opp_ws_client(opp)
await client.send_json(
{
ID: 1,
TYPE: "ozw/get_code_slots",
NODE_ID: 10,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 2,
TYPE: "ozw/set_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
ATTR_USERCODE: "1234",
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 3,
TYPE: "ozw/clear_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
}
)
msg = await client.receive_json()
assert msg["success"]
async def test_refresh_node(opp, generic_data, sent_messages, opp_ws_client):
"""Test the ozw refresh node api."""
receive_message = await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
msg = await client.receive_json()
assert len(sent_messages) == 1
assert msg["success"]
# Receive a mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "initializing"},
)
message.encode()
receive_message(message)
# Verify we got expected data on the websocket
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "initializing"
# Send another mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "versions"},
)
message.encode()
receive_message(message)
# Send a mock status update for a different node
message = MQTTMessage(
topic="OpenZWave/1/node/35/",
payload={"NodeID": 35, "NodeQueryStage": "fake_shouldnt_be_received"},
)
message.encode()
receive_message(message)
# Verify we received the message for node 39 but not for node 35
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "versions"
async def test_refresh_node_unsubscribe(opp, generic_data, opp_ws_client):
"""Test unsubscribing the ozw refresh node api."""
await setup_ozw(opp, fixture=generic_data)
client = await opp_ws_client(opp)
with patch("openzwavemqtt.OZWOptions.listen") as mock_listen:
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
await client.receive_json()
# Send the unsubscribe command
await client.send_json({ID: 10, TYPE: "unsubscribe_events", "subscription": 9})
await client.receive_json()
assert mock_listen.return_value.called
| 29.795337
| 87
| 0.629858
| 1,441
| 11,501
| 4.795975
| 0.147814
| 0.082767
| 0.070323
| 0.08595
| 0.597887
| 0.528144
| 0.472435
| 0.407322
| 0.363623
| 0.351469
| 0
| 0.023658
| 0.261282
| 11,501
| 385
| 88
| 29.872727
| 0.789783
| 0.066081
| 0
| 0.411576
| 0
| 0
| 0.128479
| 0.040523
| 0
| 0
| 0
| 0
| 0.202572
| 1
| 0
| false
| 0
| 0.022508
| 0
| 0.022508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aaa20d8b1879b4c1bc74cd5f86f6df85b43a7e8
| 30,173
|
py
|
Python
|
tests/test_formatters.py
|
samueljacques-qc/notification-utils
|
77f09cb2633ea5938a28ed50c21c7ae5075da7f2
|
[
"MIT"
] | null | null | null |
tests/test_formatters.py
|
samueljacques-qc/notification-utils
|
77f09cb2633ea5938a28ed50c21c7ae5075da7f2
|
[
"MIT"
] | null | null | null |
tests/test_formatters.py
|
samueljacques-qc/notification-utils
|
77f09cb2633ea5938a28ed50c21c7ae5075da7f2
|
[
"MIT"
] | null | null | null |
import pytest
from flask import Markup
from notifications_utils.formatters import (
unlink_govuk_escaped,
notify_email_markdown,
notify_letter_preview_markdown,
notify_plain_text_email_markdown,
sms_encode,
formatted_list,
strip_dvla_markup,
strip_pipes,
escape_html,
remove_whitespace_before_punctuation,
make_quotes_smart,
replace_hyphens_with_en_dashes,
tweak_dvla_list_markup,
nl2li,
strip_whitespace,
strip_and_remove_obscure_whitespace,
remove_smart_quotes_from_email_addresses,
strip_unsupported_characters,
normalise_whitespace
)
from notifications_utils.template import (
HTMLEmailTemplate,
PlainTextEmailTemplate,
SMSMessageTemplate,
SMSPreviewTemplate
)
@pytest.mark.parametrize(
"url", [
"http://example.com",
"http://www.gov.uk/",
"https://www.gov.uk/",
"http://service.gov.uk",
"http://service.gov.uk/blah.ext?q=a%20b%20c&order=desc#fragment",
pytest.param("http://service.gov.uk/blah.ext?q=one two three", marks=pytest.mark.xfail),
]
)
def test_makes_links_out_of_URLs(url):
link = '<a style="word-wrap: break-word; color: #005ea5;" href="{}">{}</a>'.format(url, url)
assert (notify_email_markdown(url) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(link))
@pytest.mark.parametrize('input, output', [
(
(
'this is some text with a link http://example.com in the middle'
),
(
'this is some text with a link '
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">http://example.com</a>'
' in the middle'
),
),
(
(
'this link is in brackets (http://example.com)'
),
(
'this link is in brackets '
'(<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">http://example.com</a>)'
),
)
])
def test_makes_links_out_of_URLs_in_context(input, output):
assert notify_email_markdown(input) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(output)
@pytest.mark.parametrize(
"url", [
"example.com",
"www.example.com",
"ftp://example.com",
"[email protected]",
"mailto:[email protected]",
"<a href=\"https://example.com\">Example</a>",
]
)
def test_doesnt_make_links_out_of_invalid_urls(url):
assert notify_email_markdown(url) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(url)
def test_handles_placeholders_in_urls():
assert notify_email_markdown(
"http://example.com/?token=<span class='placeholder'>((token))</span>&key=1"
) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com/?token=">'
'http://example.com/?token='
'</a>'
'<span class=\'placeholder\'>((token))</span>&key=1'
'</p>'
)
@pytest.mark.parametrize(
"url, expected_html, expected_html_in_template", [
(
"""https://example.com"onclick="alert('hi')""",
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22onclick=%22alert%28%27hi">https://example.com"onclick="alert('hi</a>')""", # noqa
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22onclick=%22alert%28%27hi">https://example.com"onclick="alert('hi</a>‘)""", # noqa
),
(
"""https://example.com"style='text-decoration:blink'""",
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22style=%27text-decoration:blink">https://example.com"style='text-decoration:blink</a>'""", # noqa
"""<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22style=%27text-decoration:blink">https://example.com"style='text-decoration:blink</a>’""", # noqa
),
]
)
def test_URLs_get_escaped(url, expected_html, expected_html_in_template):
assert notify_email_markdown(url) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'{}'
'</p>'
).format(expected_html)
assert expected_html_in_template in str(HTMLEmailTemplate({'content': url, 'subject': ''}))
def test_HTML_template_has_URLs_replaced_with_links():
assert (
'<a style="word-wrap: break-word; color: #005ea5;" href="https://service.example.com/accept_invite/a1b2c3d4">'
'https://service.example.com/accept_invite/a1b2c3d4'
'</a>'
) in str(HTMLEmailTemplate({'content': (
'You’ve been invited to a service. Click this link:\n'
'https://service.example.com/accept_invite/a1b2c3d4\n'
'\n'
'Thanks\n'
), 'subject': ''}))
@pytest.mark.parametrize('markdown_function, expected_output', [
(notify_email_markdown, (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com">'
'https://example.com'
'</a>'
'</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'Next paragraph'
'</p>'
)),
(notify_plain_text_email_markdown, (
'\n'
'\nhttps://example.com'
'\n'
'\nNext paragraph'
)),
])
def test_preserves_whitespace_when_making_links(
markdown_function, expected_output
):
assert markdown_function(
'https://example.com\n'
'\n'
'Next paragraph'
) == expected_output
@pytest.mark.parametrize(
"template_content,expected", [
("gov.uk", u"gov.\u200Buk"),
("GOV.UK", u"GOV.\u200BUK"),
("Gov.uk", u"Gov.\u200Buk"),
("https://gov.uk", "https://gov.uk"),
("https://www.gov.uk", "https://www.gov.uk"),
("www.gov.uk", "www.gov.uk"),
("gov.uk/register-to-vote", "gov.uk/register-to-vote"),
("gov.uk?q=", "gov.uk?q=")
]
)
def test_escaping_govuk_in_email_templates(template_content, expected):
assert unlink_govuk_escaped(template_content) == expected
assert expected in str(PlainTextEmailTemplate({'content': template_content, 'subject': ''}))
assert expected in str(HTMLEmailTemplate({'content': template_content, 'subject': ''}))
@pytest.mark.parametrize(
"subject,expected", [
("bonjour | hi", "bonjour | hi"),
("bonjour .", "bonjour."),
('double -- dash', 'double \u2013 dash'),
]
)
def test_subject_is_cleaned_up(subject, expected):
assert expected == HTMLEmailTemplate({'content': '', 'subject': subject}).subject
@pytest.mark.parametrize(
"prefix, body, expected", [
("a", "b", "a: b"),
(None, "b", "b"),
]
)
def test_sms_message_adds_prefix(prefix, body, expected):
template = SMSMessageTemplate({'content': body})
template.prefix = prefix
template.sender = None
assert str(template) == expected
def test_sms_preview_adds_newlines():
template = SMSPreviewTemplate({'content': """
the
quick
brown fox
"""})
template.prefix = None
template.sender = None
assert '<br>' in str(template)
@pytest.mark.parametrize(
'markdown_function, expected',
(
[
notify_letter_preview_markdown,
'print("hello")'
],
[
notify_email_markdown,
'print("hello")'
],
[
notify_plain_text_email_markdown,
'print("hello")'
],
)
)
def test_block_code(markdown_function, expected):
assert markdown_function('```\nprint("hello")\n```') == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>inset text</p>'
)
],
[
notify_email_markdown,
(
'<blockquote '
'style="Margin: 0 0 20px 0; border-left: 10px solid #BFC1C3;'
'padding: 15px 0 0.1px 15px; font-size: 19px; line-height: 25px;'
'">'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">inset text</p>'
'</blockquote>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\ninset text'
),
],
))
def test_block_quote(markdown_function, expected):
assert markdown_function('^ inset text') == expected
@pytest.mark.parametrize('heading', (
'# heading',
'#heading',
))
@pytest.mark.parametrize(
'markdown_function, expected',
(
[
notify_letter_preview_markdown,
'<h2>heading</h2>\n'
],
[
notify_email_markdown,
(
'<h2 style="Margin: 0 0 20px 0; padding: 0; font-size: 27px; '
'line-height: 35px; font-weight: bold; color: #0B0C0C;">'
'heading'
'</h2>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\n'
'\nheading'
'\n-----------------------------------------------------------------'
),
],
)
)
def test_level_1_header(markdown_function, heading, expected):
assert markdown_function(heading) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>inset text</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">inset text</p>'
],
[
notify_plain_text_email_markdown,
(
'\n'
'\ninset text'
),
],
))
def test_level_2_header(markdown_function, expected):
assert markdown_function('## inset text') == (expected)
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>a</p>'
'<div class="page-break"> </div>'
'<p>b</p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">a</p>'
'<hr style="border: 0; height: 1px; background: #BFC1C3; Margin: 30px 0 30px 0;">'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">b</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\na'
'\n'
'\n================================================================='
'\n'
'\nb'
),
],
))
def test_hrule(markdown_function, expected):
assert markdown_function('a\n\n***\n\nb') == expected
assert markdown_function('a\n\n---\n\nb') == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<ol>\n'
'<li>one</li>\n'
'<li>two</li>\n'
'<li>three</li>\n'
'</ol>\n'
)
],
[
notify_email_markdown,
(
'<table role="presentation" style="padding: 0 0 20px 0;">'
'<tr>'
'<td style="font-family: Helvetica, Arial, sans-serif;">'
'<ol style="Margin: 0 0 0 20px; padding: 0; list-style-type: decimal;">'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">one</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">two</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">three</li>'
'</ol>'
'</td>'
'</tr>'
'</table>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\n1. one'
'\n2. two'
'\n3. three'
),
],
))
def test_ordered_list(markdown_function, expected):
assert markdown_function(
'1. one\n'
'2. two\n'
'3. three\n'
) == expected
assert markdown_function(
'1.one\n'
'2.two\n'
'3.three\n'
) == expected
@pytest.mark.parametrize('markdown', (
( # no space
'*one\n'
'*two\n'
'*three\n'
),
( # single space
'* one\n'
'* two\n'
'* three\n'
),
( # two spaces
'* one\n'
'* two\n'
'* three\n'
),
( # tab
'* one\n'
'* two\n'
'* three\n'
),
( # dash as bullet
'- one\n'
'- two\n'
'- three\n'
),
pytest.param(( # plus as bullet
'+ one\n'
'+ two\n'
'+ three\n'
), marks=pytest.mark.xfail(raises=AssertionError)),
( # bullet as bullet
'• one\n'
'• two\n'
'• three\n'
),
))
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<ul>\n'
'<li>one</li>\n'
'<li>two</li>\n'
'<li>three</li>\n'
'</ul>\n'
)
],
[
notify_email_markdown,
(
'<table role="presentation" style="padding: 0 0 20px 0;">'
'<tr>'
'<td style="font-family: Helvetica, Arial, sans-serif;">'
'<ul style="Margin: 0 0 0 20px; padding: 0; list-style-type: disc;">'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">one</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">two</li>'
'<li style="Margin: 5px 0 5px; padding: 0 0 0 5px; font-size: 19px;'
'line-height: 25px; color: #0B0C0C;">three</li>'
'</ul>'
'</td>'
'</tr>'
'</table>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\n• one'
'\n• two'
'\n• three'
),
],
))
def test_unordered_list(markdown, markdown_function, expected):
assert markdown_function(markdown) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>+ one</p><p>+ two</p><p>+ three</p>',
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">+ one</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">+ two</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">+ three</p>'
),
],
[
notify_plain_text_email_markdown,
(
'\n\n+ one'
'\n\n+ two'
'\n\n+ three'
),
],
))
def test_pluses_dont_render_as_lists(markdown_function, expected):
assert markdown_function(
'+ one\n'
'+ two\n'
'+ three\n'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>'
'line one<br>'
'line two'
'</p>'
'<p>'
'new paragraph'
'</p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">line one<br />'
'line two</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">new paragraph</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nline one'
'\nline two'
'\n'
'\nnew paragraph'
),
],
))
def test_paragraphs(markdown_function, expected):
assert markdown_function(
'line one\n'
'line two\n'
'\n'
'new paragraph'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>before</p>'
'<p>after</p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">before</p>'
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">after</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nbefore'
'\n'
'\nafter'
),
],
))
def test_multiple_newlines_get_truncated(markdown_function, expected):
assert markdown_function(
'before\n\n\n\n\n\nafter'
) == expected
@pytest.mark.parametrize('markdown_function', (
notify_letter_preview_markdown, notify_email_markdown, notify_plain_text_email_markdown
))
def test_table(markdown_function):
assert markdown_function(
'col | col\n'
'----|----\n'
'val | val\n'
) == (
''
)
@pytest.mark.parametrize('markdown_function, link, expected', (
[
notify_letter_preview_markdown,
'http://example.com',
'<p><strong>example.com</strong></p>'
],
[
notify_email_markdown,
'http://example.com',
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">http://example.com</a>'
'</p>'
)
],
[
notify_email_markdown,
"""https://example.com"onclick="alert('hi')""",
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="https://example.com%22onclick=%22alert%28%27hi">'
'https://example.com"onclick="alert(\'hi'
'</a>\')'
'</p>'
)
],
[
notify_plain_text_email_markdown,
'http://example.com',
(
'\n'
'\nhttp://example.com'
),
],
))
def test_autolink(markdown_function, link, expected):
assert markdown_function(link) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>variable called thing</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">variable called thing</p>'
],
[
notify_plain_text_email_markdown,
'\n\nvariable called thing',
],
))
def test_codespan(markdown_function, expected):
assert markdown_function(
'variable called `thing`'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>something important</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">something **important**</p>'
],
[
notify_plain_text_email_markdown,
'\n\nsomething **important**',
],
))
def test_double_emphasis(markdown_function, expected):
assert markdown_function(
'something **important**'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>something important</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">something *important*</p>'
],
[
notify_plain_text_email_markdown,
'\n\nsomething *important*',
],
))
def test_emphasis(markdown_function, expected):
assert markdown_function(
'something *important*'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">foo ****** bar</p>'
],
[
notify_plain_text_email_markdown,
'\n\nfoo ****** bar',
],
))
def test_nested_emphasis(markdown_function, expected):
assert markdown_function(
'foo ****** bar'
) == expected
@pytest.mark.parametrize('markdown_function', (
notify_letter_preview_markdown, notify_email_markdown, notify_plain_text_email_markdown
))
def test_image(markdown_function):
assert markdown_function(
''
) == (
''
)
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>Example: <strong>example.com</strong></p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; '
'color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com">Example</a>'
'</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nExample: http://example.com'
),
],
))
def test_link(markdown_function, expected):
assert markdown_function(
'[Example](http://example.com)'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
(
'<p>Example: <strong>example.com</strong></p>'
)
],
[
notify_email_markdown,
(
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; '
'color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com" title="An example URL">'
'Example'
'</a>'
'</p>'
)
],
[
notify_plain_text_email_markdown,
(
'\n'
'\nExample (An example URL): http://example.com'
),
],
))
def test_link_with_title(markdown_function, expected):
assert markdown_function(
'[Example](http://example.com "An example URL")'
) == expected
@pytest.mark.parametrize('markdown_function, expected', (
[
notify_letter_preview_markdown,
'<p>Strike</p>'
],
[
notify_email_markdown,
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">Strike</p>'
],
[
notify_plain_text_email_markdown,
'\n\nStrike'
],
))
def test_strikethrough(markdown_function, expected):
assert markdown_function('~~Strike~~') == expected
def test_footnotes():
# Can’t work out how to test this
pass
def test_sms_encode():
assert sms_encode('aàá…') == 'aàa...'
@pytest.mark.parametrize('items, kwargs, expected_output', [
([1], {}, '‘1’'),
([1, 2], {}, '‘1’ and ‘2’'),
([1, 2, 3], {}, '‘1’, ‘2’ and ‘3’'),
([1, 2, 3], {'prefix': 'foo', 'prefix_plural': 'bar'}, 'bar ‘1’, ‘2’ and ‘3’'),
([1], {'prefix': 'foo', 'prefix_plural': 'bar'}, 'foo ‘1’'),
([1, 2, 3], {'before_each': 'a', 'after_each': 'b'}, 'a1b, a2b and a3b'),
([1, 2, 3], {'conjunction': 'foo'}, '‘1’, ‘2’ foo ‘3’'),
(['&'], {'before_each': '<i>', 'after_each': '</i>'}, '<i>&</i>'),
([1, 2, 3], {'before_each': '<i>', 'after_each': '</i>'}, '<i>1</i>, <i>2</i> and <i>3</i>'),
])
def test_formatted_list(items, kwargs, expected_output):
assert formatted_list(items, **kwargs) == expected_output
def test_formatted_list_returns_markup():
assert isinstance(formatted_list([0]), Markup)
def test_removing_dvla_markup():
assert strip_dvla_markup(
(
'some words & some more <words>'
'<cr><h1><h2><p><normal><op><np><bul><tab>'
'<CR><H1><H2><P><NORMAL><OP><NP><BUL><TAB>'
'<tAb>'
)
) == 'some words & some more <words>'
def test_removing_pipes():
assert strip_pipes('|a|b|c') == 'abc'
def test_bleach_doesnt_try_to_make_valid_html_before_cleaning():
assert escape_html(
"<to cancel daily cat facts reply 'cancel'>"
) == (
"<to cancel daily cat facts reply 'cancel'>"
)
@pytest.mark.parametrize('dirty, clean', [
(
'Hello ((name)) ,\n\nThis is a message',
'Hello ((name)),\n\nThis is a message'
),
(
'Hello Jo ,\n\nThis is a message',
'Hello Jo,\n\nThis is a message'
),
(
'\n \t , word',
'\n, word',
),
(
'bonjour | hi',
'bonjour | hi',
),
])
def test_removing_whitespace_before_punctuation(dirty, clean):
assert remove_whitespace_before_punctuation(dirty) == clean
@pytest.mark.parametrize('dirty, clean', [
(
'Hello ((name)) .\n\nThis is a message',
'Hello ((name)).\n\nThis is a message'
),
(
'Hello Jo .\n\nThis is a message',
'Hello Jo.\n\nThis is a message'
),
(
'\n \t . word',
'\n. word',
),
])
def test_removing_whitespace_before_full_stops(dirty, clean):
assert remove_whitespace_before_punctuation(dirty) == clean
@pytest.mark.parametrize('dumb, smart', [
(
"""And I said, "what about breakfast at Tiffany's"?""",
"""And I said, “what about breakfast at Tiffany’s”?""",
),
(
"""
<a href="http://example.com?q='foo'">http://example.com?q='foo'</a>
""",
"""
<a href="http://example.com?q='foo'">http://example.com?q='foo'</a>
""",
),
])
def test_smart_quotes(dumb, smart):
assert make_quotes_smart(dumb) == smart
@pytest.mark.parametrize('nasty, nice', [
(
(
'The en dash - always with spaces in running text when, as '
'discussed in this section, indicating a parenthesis or '
'pause - and the spaced em dash both have a certain '
'technical advantage over the unspaced em dash. '
),
(
'The en dash \u2013 always with spaces in running text when, as '
'discussed in this section, indicating a parenthesis or '
'pause \u2013 and the spaced em dash both have a certain '
'technical advantage over the unspaced em dash. '
),
),
(
'double -- dash',
'double \u2013 dash',
),
(
'triple --- dash',
'triple \u2013 dash',
),
(
'quadruple ---- dash',
'quadruple ---- dash',
),
(
'em — dash',
'em – dash',
),
(
'already\u0020–\u0020correct', # \u0020 is a normal space character
'already\u0020–\u0020correct',
),
(
'2004-2008',
'2004-2008', # no replacement
),
(
'bonjour | hi',
'bonjour | hi',
),
])
def test_en_dashes(nasty, nice):
assert replace_hyphens_with_en_dashes(nasty) == nice
def test_unicode_dash_lookup():
en_dash_replacement_sequence = '\u0020\u2013'
hyphen = '-'
en_dash = '–'
space = ' '
non_breaking_space = ' '
assert en_dash_replacement_sequence == space + en_dash
assert non_breaking_space not in en_dash_replacement_sequence
assert hyphen not in en_dash_replacement_sequence
@pytest.mark.parametrize('markup, expected_fixed', [
(
'a',
'a',
),
(
'before<p><cr><p><cr>after',
'before<p><cr>after',
),
(
'before<cr><cr><np>after',
'before<cr><np>after',
),
(
'before{}<np>after'.format('<cr>' * 4),
'before{}<np>after'.format('<cr>' * 3),
),
])
def test_tweaking_dvla_list_markup(markup, expected_fixed):
assert tweak_dvla_list_markup(markup) == expected_fixed
def test_make_list_from_linebreaks():
assert nl2li(
'a\n'
'b\n'
'c\n'
) == (
'<ul>'
'<li>a</li>'
'<li>b</li>'
'<li>c</li>'
'</ul>'
)
@pytest.mark.parametrize('value', [
'bar',
' bar ',
"""
\t bar
""",
' \u180E\u200B \u200C bar \u200D \u2060\uFEFF ',
])
def test_strip_whitespace(value):
assert strip_whitespace(value) == 'bar'
@pytest.mark.parametrize('value', [
'notifications-email',
' \tnotifications-email \x0c ',
'\rn\u200Coti\u200Dfi\u200Bcati\u2060ons-\u180Eemai\uFEFFl\uFEFF',
])
def test_strip_and_remove_obscure_whitespace(value):
assert strip_and_remove_obscure_whitespace(value) == 'notifications-email'
def test_strip_and_remove_obscure_whitespace_only_removes_normal_whitespace_from_ends():
sentence = ' words \n over multiple lines with \ttabs\t '
assert strip_and_remove_obscure_whitespace(sentence) == 'words \n over multiple lines with \ttabs'
def test_remove_smart_quotes_from_email_addresses():
assert remove_smart_quotes_from_email_addresses("""
line one’s quote
first.o’[email protected] is someone’s email address
line ‘three’
""") == ("""
line one’s quote
first.o'[email protected] is someone’s email address
line ‘three’
""")
def test_strip_unsupported_characters():
assert strip_unsupported_characters("line one\u2028line two") == ("line oneline two")
def test_normalise_whitespace():
assert normalise_whitespace('\u200C Your tax is\ndue\n\n') == 'Your tax is due'
| 28.094041
| 190
| 0.545388
| 3,399
| 30,173
| 4.677846
| 0.12739
| 0.065409
| 0.050189
| 0.039434
| 0.680189
| 0.637107
| 0.562327
| 0.500126
| 0.477421
| 0.46522
| 0
| 0.035045
| 0.286945
| 30,173
| 1,073
| 191
| 28.120224
| 0.703323
| 0.006131
| 0
| 0.440748
| 0
| 0.051975
| 0.386338
| 0.028247
| 0
| 0
| 0
| 0
| 0.060291
| 1
| 0.053015
| false
| 0.00104
| 0.012474
| 0
| 0.065489
| 0.004158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aac44d185f9607658c52f2deb96d4cdd7259f28
| 5,384
|
py
|
Python
|
codigos_videos/Exemplo_2.py
|
Miguel-mmf/Biblioteca_Dash_em-Python
|
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
|
[
"MIT"
] | 1
|
2022-03-17T13:55:33.000Z
|
2022-03-17T13:55:33.000Z
|
codigos_videos/Exemplo_2.py
|
Miguel-mmf/Biblioteca_Dash_em-Python
|
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
|
[
"MIT"
] | null | null | null |
codigos_videos/Exemplo_2.py
|
Miguel-mmf/Biblioteca_Dash_em-Python
|
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
|
[
"MIT"
] | 1
|
2020-12-12T21:56:06.000Z
|
2020-12-12T21:56:06.000Z
|
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Esse arquivo possui algumas modificações em relação ao arquivo apresentado no vídeo do YouTube
# Não deixe de assistir o vídeo e estudar pela documentação ofical Dash
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# importando as bibliotecas necessárias
import dash
import dash_core_components as dcc
import dash_html_components as html
# importando as funções que auxiliam no funcionamento das callbacks do subpacote dependencies do pacote dash
from dash.dependencies import Input, Output
# importando o módulo graph_objects da biblioteca plotly
import plotly.graph_objects as go
# adicionando um estilo externo através do link abaixo
# esse link é o recomendado pela documentação da biblioteca Dash e ao acessar esse link no seu navegador,
# você perceberá que ele possui a estrutura de um arquivo CSS
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# criando a aplicação por meio da função Dash do pacote dash e atribuindo a variável app
app = dash.Dash(
__name__,
external_stylesheets=external_stylesheets
)
# criando uma função para gerar um gráfico com a biblioteca plotly.graph_objects
def gera_grafico(tipo):
# criando uma figura
# caso você faça print(fig), um dicionário será apresentado uma vez que as figuras podem ser representadas dessa forma, necessitando de módulos da biblioteca plotly para trabalhar com as informações
fig = go.Figure()
# https://plotly.com/python/creating-and-updating-figures/
# adicionando um traço a figura
fig.add_trace(
go.Scatter(
x=[0,1,2,3,4,5,6],
y=[0,1,2,3,4,5,6],
mode=tipo,
name='Reta',
)
)
fig.add_trace(
go.Scatter(
x=[0,1,2,3,4,5,6],
y=[0,1,4,9,16,25,36],
mode=tipo,
name='Parábola',
)
)
# adicionando um título ao gráfico
fig.update_layout(title='Gráfico Exemplo')
# variável retornada pela função gera_grafico(tipo)
return fig
# criando um layout para a variável app
# adicionando ao layout um componente html.Div que irá conter os demais componentes que dão forma
app.layout = html.Div([
# inserindo um componente da biblioteca dash HTML components como título/cabeçalho do layout
html.H2(
['Painel de Visualização de Gráficos'],
# o parâmetro style define estilos css para o componente
style={
'textAlign':'center', # texto alinhado
'font-weight':'bold' # texto em negrito
}
),
# adicionando uma linha horizontal no layout
html.Hr(),
# criando abas pai dentro do layout
dcc.Tabs(
# identidade/nome do componente
id='tabs',
# criando as abas filhas dentro do parâmetro children da função Tabs()
children=[
dcc.Tab(label='Gráfico de linha',value='tab-1'),
dcc.Tab(label='Gráfico de Barra',value='tab-2'),
dcc.Tab(label='Gráfico de Linha e Pontos',value='tab-3')
]
),
# onde será apresentado o conteúdo das abas logo após a callback ser ativada
html.Div(id='tabs-content'),
html.Hr(),
])
# Callback
# estruturando a callback com as entradas (input) e saídas (output)
@app.callback(
# Output(component_id,component_property)
Output('tabs-content','children'),
[
# Input(component_id,component_property)
Input('tabs','value')
]
)
# função que será chamada pela callback
def update_tab(tab):
# quando a aba com valor igual a 'tab-1' for selecionada, a propriedade children do componente 'tabs-content'
# receberá o gráfico de linha retornado abaixo pela função gera_gráfico(tipo='lines')
if tab == 'tab-1':
return html.Div([
dcc.Graph(figure = gera_grafico('lines'))
])
# quando a aba com valor igual a 'tab-2' for selecionada, a propriedade children do componente 'tabs-content'
# receberá o gráfico de barras construído e retornado abaixo
elif tab == 'tab-2':
fig_bar = go.Figure()
fig_bar.add_trace(
go.Bar(
x=[0,1,2,3,4,5,6],
y=[0,1,2,3,4,5,6],
)
)
fig_bar.add_trace(
go.Bar(
x=[0,1,2,3,4,5,6],
y=[0,1,4,9,16,25,36],
)
)
fig_bar.update_layout(title='Gráfico em Barras Exemplo')
return html.Div([
dcc.Graph(figure = fig_bar)
])
# quando a aba com valor igual a 'tab-3' for selecionada, a propriedade children do componente 'tabs-content'
# receberá o gráfico de linha retornado abaixo pela função gera_gráfico(tipo='lines+markers')
elif tab == 'tab-3':
return html.Div([
dcc.Graph(figure = gera_grafico('lines+markers'))
])
# caso nenhuma das condições acima sejam aceitas, significa que existe um erro, e assim, retornamos a mensagem de erro
else:
return html.Div(['Erro!'])
# servindo a aplicação em dash como versão para teste
if __name__ == "__main__":
app.run_server(debug=True)
| 32.630303
| 202
| 0.610513
| 702
| 5,384
| 4.618234
| 0.34188
| 0.004935
| 0.005552
| 0.007403
| 0.227329
| 0.22116
| 0.197409
| 0.197409
| 0.172424
| 0.145898
| 0
| 0.018118
| 0.261887
| 5,384
| 165
| 203
| 32.630303
| 0.797685
| 0.535847
| 0
| 0.333333
| 0
| 0
| 0.130647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.057471
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aacd880ac180e15a9b5e161088e8e7de26eb77d
| 26,616
|
py
|
Python
|
Lab 2/javaccflab/formatter.py
|
tochanenko/MetaProgramming
|
d37f21432483e39e135fd0dc4f8767836eea1609
|
[
"MIT"
] | null | null | null |
Lab 2/javaccflab/formatter.py
|
tochanenko/MetaProgramming
|
d37f21432483e39e135fd0dc4f8767836eea1609
|
[
"MIT"
] | null | null | null |
Lab 2/javaccflab/formatter.py
|
tochanenko/MetaProgramming
|
d37f21432483e39e135fd0dc4f8767836eea1609
|
[
"MIT"
] | null | null | null |
import re
import datetime
from javaccflab.lexer import parse
from javaccflab.java_token import TokenType, Token, update_token_value
class Formatter:
def __init__(self, files):
self.__files = files
self.__file = None
self.__tokens = []
self.__to_fix = dict()
def process(self):
tokens = []
for file in self.__files:
tokens.append(parse(open(file, 'r').read()))
i = 0
while i < len(tokens):
self.__tokens = tokens[i]
self.__file = self.__files[i]
self.__find_to_fix()
tokens[i] = self.__tokens
i += 1
i = 0
while i < len(tokens):
self.__tokens = tokens[i]
self.__file = self.__files[i]
self.__fix()
self.__fix_comments()
tokens[i] = self.__tokens
i += 1
return tokens
def __find_to_fix(self):
i = 0
while i < len(self.__tokens):
token = self.__tokens[i]
if token.get_value() == 'package':
i = self.__fix_package(i)
elif token.get_value() in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.':
i = self.__skip_ws_tokens(i + 1)
if not Formatter.is_camel_upper_case(self.__tokens[i].get_value()):
self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_camel_upper_case(
self.__tokens[i].get_value())
i = self.__fix_class_body(i, self.__tokens[i].get_value())
i += 1
def __fix_package(self, pos):
pos = self.__skip_ws_tokens(pos)
while self.__tokens[pos].get_value() != ';':
if self.__tokens[pos].get_type() == TokenType.IDENTIFIER and not Formatter.is_lower_case(
self.__tokens[pos].get_value()):
self.__to_fix[self.__tokens[pos].get_value()] = Formatter.to_lower_case(
(self.__tokens[pos].get_value()))
pos += 1
return pos
def __fix_class_body(self, pos, class_name):
while self.__tokens[pos].get_value() != '{':
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
elif self.__tokens[pos].get_value() == 'static':
i = self.__skip_ws_tokens(pos + 1)
if self.__tokens[i].get_value() == '{':
pos = i + 1
count += 1
continue
elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD):
if self.__is_parameter(pos):
parameters, i = self.__get_field_names(pos)
if self.__is_final(pos):
for parameter in parameters:
if not Formatter.is_snake_upper_case(parameter):
self.__to_fix[parameter] = Formatter.to_snake_upper_case(parameter)
else:
for parameter in parameters:
if not Formatter.is_camel_lower_case(parameter):
self.__to_fix[parameter] = Formatter.to_camel_lower_case(parameter)
pos = i
else:
self.__fix_method_name(pos, class_name)
parameters = self.__get_method_parameters(pos)
pos = self.__fix_method_body(pos, parameters)
pos += 1
return pos
def __fix_method_name(self, i, class_name):
while self.__tokens[i].get_value() not in ('(', ';'):
i += 1
i -= 1
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
if self.__tokens[i].get_value() != class_name and not Formatter.is_snake_lower_case(
self.__tokens[i].get_value()):
self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_snake_lower_case(self.__tokens[i].get_value())
def __get_method_parameters(self, i):
parameters = dict()
while self.__tokens[i].get_value() != '(':
i += 1
while self.__tokens[i].get_value() != ')':
if self.__tokens[i + 1].get_value() in (')', ','):
pos = i
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if not Formatter.is_camel_lower_case(self.__tokens[pos].get_value()):
fixed_value = Formatter.to_camel_lower_case(self.__tokens[pos].get_value())
parameters[self.__tokens[pos].get_value()] = fixed_value
update_token_value(self.__file, self.__tokens[pos], fixed_value)
i += 1
return parameters
def __fix_method_body(self, i, method_parameters):
params = dict()
while self.__tokens[i].get_value() not in ('{', ';'):
if self.__tokens[i].get_value() in method_parameters.keys():
update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()])
i += 1
if self.__tokens[i].get_value() == ';':
return i + 1
brace_count = 1
i += 1
while brace_count != 0:
if self.__tokens[i].get_value() == '{':
brace_count += 1
elif self.__tokens[i].get_value() == '}':
brace_count -= 1
elif self.__tokens[i].get_value() in ('=', ';'):
naming_pos = i - 1
while self.__tokens[naming_pos].get_type() == TokenType.WHITESPACE:
naming_pos -= 1
if self.__tokens[naming_pos].get_type() == TokenType.IDENTIFIER:
type_pos = naming_pos - 1
while self.__tokens[type_pos].get_type() == TokenType.WHITESPACE:
type_pos -= 1
if (self.__tokens[type_pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and \
self.__tokens[type_pos].get_value() not in ('class', 'identifier')) or self.__tokens[
type_pos].get_value() == ',':
if not Formatter.is_camel_lower_case(self.__tokens[naming_pos].get_value()):
fixed_value = Formatter.to_camel_lower_case(self.__tokens[naming_pos].get_value())
params[self.__tokens[naming_pos].get_value()] = fixed_value
update_token_value(self.__file, self.__tokens[naming_pos], fixed_value)
elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[
i].get_value() in params.keys():
update_token_value(self.__file, self.__tokens[i], params[self.__tokens[i].get_value()])
elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[
i].get_value() in method_parameters.keys():
update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()])
i += 1
return i
def __get_field_names(self, i):
params = []
while self.__tokens[i].get_value() != ';':
if self.__tokens[i + 1].get_value() in (';', '=', ','):
pos = i
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
field_name = self.__tokens[pos].get_value()
is_value = False
if self.__tokens[i + 1].get_value() in (';', ','):
while pos > 0 and self.__tokens[pos].get_value() not in (';', '}'):
if self.__tokens[pos].get_value() == '=':
is_value = True
pos -= 1
if not is_value:
params.append(field_name)
i += 1
end = i
return params, end
def __is_final(self, i):
while self.__tokens[i].get_value() not in (';', '=', '('):
if self.__tokens[i].get_value() == 'final':
return True
i += 1
return False
def __is_parameter(self, pos):
while self.__tokens[pos].get_value() != ';' and pos < len(self.__tokens):
if self.__tokens[pos].get_value() == '=':
return True
elif self.__tokens[pos].get_value() in ('class', 'interface', '(', ')'):
return False
pos += 1
return True
def __fix(self):
for token in self.__tokens:
if token.get_value() in self.__to_fix and not token.is_fixed():
update_token_value(self.__file, token, self.__to_fix[token.get_value()])
def __fix_comments(self):
self.__add_start_comment()
i = 0
while i < len(self.__tokens):
if self.__tokens[i].get_value() in ('class', 'interface'):
i = self.__fix_class_comments(i)
i += 1
i += 1
# Fix start comment
def __add_start_comment(self):
if not self.__is_start_comment_exists():
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'/*\n' \
f' * {self.__find_class_name()}\n' \
f' *\n' \
f' * {datetime.date.today().strftime("%B %d, %Y")}\n' \
f' */'
update_token_value(self.__file, comment_token, comment_string)
self.__tokens.insert(0, comment_token)
self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE))
def __is_start_comment_exists(self):
i = self.__skip_ws_tokens(0)
return self.__tokens[i].get_type() == TokenType.COMMENT
def __find_class_name(self, i=0):
while self.__tokens[i].get_value() not in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.':
i += 1
i = self.__skip_ws_tokens(i + 1)
return self.__tokens[i].get_value()
# Fix class comment
def __fix_class_comments(self, pos):
comment_token = self.__find_doc_comment_before(pos)
if comment_token is None:
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'/**\n' \
f' * Implementation of {self.__find_class_name(pos)}\n' \
f' */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
return self.__fix_class_body_comments(pos)
# Fix comments for methods and fields
def __fix_class_body_comments(self, pos):
while self.__tokens[pos].get_value() != '{':
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
elif self.__tokens[pos].get_value() == 'static':
i = self.__skip_ws_tokens(pos + 1)
if self.__tokens[i].get_value() == '{':
pos = i + 1
count += 1
continue
elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and self.__tokens[
pos + 1].get_value() != '.' and self.__tokens[pos].get_value() not in ('class', 'interface'):
if self.__is_parameter(pos):
pos = self.__fix_field_comment(pos)
else:
pos = self.__fix_method_comment(pos)
pos += 1
return pos
def __fix_field_comment(self, pos):
comment_token = self.__find_doc_comment_before(pos)
indent = self.__get_indent(pos)
if comment_token is None:
field_names = ', '.join(self.__get_field_names(pos)[0])
visibility = self.__find_visibility(pos)
comment_token = Token(None, TokenType.COMMENT)
comment_string = comment_string = f'{indent}/**\n' \
f'{indent} * The {visibility} {field_names} {"constant" if self.__is_final(pos) else "variable"}{"s" if len(field_names) > 0 else ""}\n' \
f'{indent} */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
return self.__find_token_after(pos, ';')
def __find_visibility(self, pos):
pos = self.__find_token_before(pos, '\n')
while self.__tokens[pos].get_value() not in ('=', ';', '('):
if self.__tokens[pos].get_value() in ('private', 'public', 'protected'):
return self.__tokens[pos].get_value()
pos += 1
return 'package-private'
def __fix_method_comment(self, pos):
comment_token = self.__find_doc_comment_before(pos)
indent = self.__get_indent(pos)
all_params = []
if comment_token is None:
params = self.__get_parameter_list(pos)
params.extend(self.__get_type_parameter_list(pos))
if len(params) > 0:
all_params.append("\n".join([f"{indent} * @param {param}" for param in params]))
throws = self.__get_throws(pos)
if len(throws) > 0:
all_params.append("\n".join([f"{indent} * @throws {param}" for param in throws]))
return_type = self.__get_return_type(pos)
if len(return_type) > 0:
all_params.append(f"{indent} * @return {self.__get_return_type(pos)}")
comment_token = Token(None, TokenType.COMMENT)
comment_string = f'{indent}/**\n' + \
'\n'.join(all_params) + \
('' if len(params) <= 0 else ' ') + \
f'\n{indent} */'
update_token_value(self.__file, comment_token, comment_string)
insert_pos = self.__find_token_before(pos, '\n')
self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE))
self.__tokens.insert(insert_pos + 1, comment_token)
else:
self.__fix_comment_links(comment_token)
params_list = self.__get_parameter_list(pos)
params_list.extend(self.__get_type_parameter_list(pos))
throws_list = self.__get_throws(pos)
return_type_value = self.__get_return_type(pos)
params, throws, return_type = self.__fix_comment_params(comment_token)
comment_string = comment_token.get_value()
append_string = ''
i = 0
if len(params) < len(params_list):
append_string += "\n" + "\n".join(
[f"{indent} * @param {param}" for param in Formatter.get_missing(params, params_list)])
i = comment_string.rfind('@param')
if i != -1:
i = comment_string.find('\n', i) if comment_string.find('\n',
i) != -1 else comment_string.find('*',
i) - 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
append_string = ''
if len(throws) < len(throws_list):
append_string += "\n" + "\n".join(
[f"{indent} * @throws {param}" for param in Formatter.get_missing(throws, throws_list)])
i = comment_string.rfind('@throws')
if i != -1:
i = comment_string.find('\n', i) if comment_string.find('\n',
i) != -1 else comment_string.find('*',
i) - 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
append_string = ''
i = comment_string.find('\n', i)
if len(return_type) == '':
append_string += "\n" + f"\n{indent} * @return {return_type_value}"
else:
i = comment_string.rfind('@return')
while comment_string[i] != '\n':
i -= 1
comment_string = comment_string[:i] + append_string + comment_string[i:]
if comment_string != comment_token.get_value():
update_token_value(self.__file, comment_token, comment_string)
return self.__skip_method(pos)
@staticmethod
def get_missing(before, after):
missing_params = []
for value in after:
if value not in before:
missing_params.append(value)
return missing_params
def __get_parameter_list(self, pos):
parameters = []
while self.__tokens[pos].get_value() != '(':
pos += 1
while self.__tokens[pos].get_value() != ')':
if self.__tokens[pos + 1].get_value() in (')', ','):
i = pos
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
parameters.append(self.__tokens[i].get_value())
pos += 1
return parameters
def __get_type_parameter_list(self, pos):
parameters = []
while self.__tokens[pos].get_value() != '<':
if self.__tokens[pos].get_value() == '(':
return parameters
pos += 1
i = pos - 1
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i -= 1
if self.__tokens[i].get_type() != TokenType.KEYWORD or self.__tokens[i].get_value() not in ('}', ';'):
return parameters
while self.__tokens[pos].get_value() != '>':
if self.__tokens[pos - 1].get_value() in ('<', ','):
i = pos
while self.__tokens[i].get_type() == TokenType.WHITESPACE:
i += 1
parameters.append(self.__tokens[i].get_value())
pos += 1
return parameters
def __get_throws(self, pos):
throws = []
is_throws = False
while self.__tokens[pos].get_value() not in ('{', ';'):
if self.__tokens[pos].get_value() == 'throws':
is_throws = True
elif is_throws and self.__tokens[pos].get_type() == TokenType.IDENTIFIER:
throws.append(self.__tokens[pos].get_value())
pos += 1
return throws
def __get_return_type(self, pos):
return_type = []
while self.__tokens[pos].get_value() != '(':
pos += 1
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
while self.__tokens[pos].get_type() != TokenType.WHITESPACE:
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if self.__tokens[pos].get_value() == '>':
while self.__tokens[pos].get_value() != '<':
return_type.append(self.__tokens[pos].get_value())
pos -= 1
return_type.append(self.__tokens[pos].get_value())
pos -= 1
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
return_type.append(self.__tokens[pos].get_value())
pos -= 1
return_type.append(self.__tokens[pos].get_value())
return_type.reverse()
return ''.join(return_type)
def __fix_comment_params(self, comment_token):
i = 0
params = []
throws = []
return_type = ''
comment_string = comment_token.get_value()
while i < len(comment_string):
if comment_string[i] == '@':
start = comment_string.find(' ', i)
macro = comment_string[i:start]
end = min(comment_string.find(' ', start + 1), comment_string.find('\n', start + 1))
end = end if end >= 0 else max(comment_string.find(' ', start + 1),
comment_string.find('\n', start + 1))
if end > 0:
value = comment_string[start + 1:end]
new_value = self.__fix_link(value)
if value != new_value:
comment_string = comment_string.replace(value, new_value)
update_token_value(self.__file, comment_token, comment_string)
value = new_value
if macro == '@param':
params.append(value)
elif macro == '@throws':
throws.append(value)
elif macro == '@return':
return_type = value
i += 1
return params, throws, return_type
def __skip_method(self, pos):
while self.__tokens[pos].get_value() != '{':
if self.__tokens[pos].get_value() == ';':
return pos + 1
pos += 1
count = 1
pos += 1
while count != 0:
if self.__tokens[pos].get_value() == '{':
count += 1
elif self.__tokens[pos].get_value() == '}':
count -= 1
pos += 1
return pos
def __find_doc_comment_before(self, pos):
while self.__tokens[pos].get_value() != '\n':
pos -= 1
while pos > 0 and self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos -= 1
if self.__tokens[pos].get_type() == TokenType.COMMENT and self.__tokens[pos].get_value().startswith('/**'):
return self.__tokens[pos]
return None
def __find_token_before(self, pos, value):
while pos > 0 and self.__tokens[pos].get_value() != value:
pos -= 1
return pos
def __find_token_after(self, pos, value):
while pos < len(self.__tokens) and self.__tokens[pos].get_value() != value:
pos += 1
return pos
def __fix_comment_links(self, comment_token):
i = 0
link = None
comment_string = comment_token.get_value()
while i < len(comment_string):
if comment_string[i] == '@':
start = comment_string.find(' ', i)
if comment_string[i:start] != '@see':
i += 1
continue
end = comment_string.find('\n', i)
link = comment_string[start:end]
elif comment_string[i] == '{':
start = comment_string.find(' ', i)
end = comment_string.find('}', i)
link = comment_string[start:end]
if link is not None:
new_link = self.__fix_link(link)
comment_string = comment_string.replace(link, new_link)
link = None
i += 1
if comment_string != comment_token.get_value():
update_token_value(self.__file, comment_token, comment_string)
def __fix_link(self, link):
for name in self.__to_fix.keys():
pos = link.find(name)
if pos != -1 and not (link[pos - 1].isalpha() or link[
pos - 1].isdigit() or link[pos - 1] == '_'):
link = link.replace(name, self.__to_fix[name])
return link
def __get_indent(self, pos):
pos = self.__find_token_before(pos, '\n')
count = 0
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
if self.__tokens[pos].get_value() == ' ':
count += 1
pos += 1
return ' ' * count
def __skip_ws_tokens(self, pos):
while self.__tokens[pos].get_type() == TokenType.WHITESPACE:
pos += 1
return pos
@staticmethod
def is_lower_case(naming):
return naming.find('_') == -1 and naming.islower()
@staticmethod
def to_lower_case(naming):
return ''.join([component.lower() for component in naming.split('_')])
@staticmethod
def is_camel_lower_case(naming):
return naming.find('_') == -1 and not naming.isupper() and not naming[0].isupper()
@staticmethod
def to_camel_lower_case(naming):
naming = Formatter.remove_underscores_around(naming)
components = [
component[0] + component[1:].lower() if component.isupper() else component[0].upper() + component[1:] for
component in naming.split('_')]
return components[0][0].lower() + components[0][1:] + ''.join(components[1:])
@staticmethod
def is_camel_upper_case(naming):
return naming.find('_') == -1 and not naming.isupper() and naming[0].isupper()
@staticmethod
def to_camel_upper_case(naming):
lower = Formatter.to_camel_lower_case(naming)
return lower[0].upper() + lower[1:]
@staticmethod
def is_snake_lower_case(naming):
return naming.islower()
@staticmethod
def to_snake_lower_case(naming):
naming = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', naming)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', naming).lower()
@staticmethod
def is_snake_upper_case(naming):
return naming.isupper()
@staticmethod
def to_snake_upper_case(naming):
return Formatter.to_snake_lower_case(naming).upper()
@staticmethod
def remove_underscores_around(naming):
i = 0
while naming[i] == '_':
i += 1
naming = naming[i:]
j = len(naming) - 1
while naming[j] == '_':
i -= 1
naming = naming[:j + 1]
return naming
| 42.449761
| 184
| 0.527916
| 3,019
| 26,616
| 4.273601
| 0.04836
| 0.114711
| 0.068517
| 0.078127
| 0.695319
| 0.617966
| 0.575647
| 0.519842
| 0.452721
| 0.413889
| 0
| 0.009561
| 0.347648
| 26,616
| 626
| 185
| 42.517572
| 0.733514
| 0.002668
| 0
| 0.469534
| 0
| 0.001792
| 0.034439
| 0.004635
| 0.003584
| 0
| 0
| 0
| 0
| 1
| 0.082437
| false
| 0
| 0.007168
| 0.012545
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aad63892e2757c199be78dfebc46a66c8e7becf
| 3,783
|
py
|
Python
|
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 63
|
2020-04-20T16:31:16.000Z
|
2022-03-29T01:05:35.000Z
|
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 5
|
2020-04-21T11:31:39.000Z
|
2022-03-24T13:42:56.000Z
|
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 8
|
2020-04-21T09:16:42.000Z
|
2022-02-23T16:28:43.000Z
|
"""
.. module:: CAttackEvasionPGDExp
:synopsis: Evasion attack using Projected Gradient Descent.
.. moduleauthor:: Battista Biggio <[email protected]>
"""
from secml.adv.attacks.evasion import CAttackEvasionPGDLS
class CAttackEvasionPGDExp(CAttackEvasionPGDLS):
"""Evasion attacks using Projected Gradient Descent with Exponential line search.
This class implements the maximum-confidence evasion attacks proposed in:
- https://arxiv.org/abs/1910.00470, EURASIP JIS, 2020.
- https://arxiv.org/abs/1708.06939, ICCV W. ViPAR, 2017.
It is the multi-class extension of our original work in:
- https://arxiv.org/abs/1708.06131, ECML 2013,
implemented using a standard projected gradient solver.
This attack uses a faster line search than PGD-LS.
In all our attacks, we use a smart double initialization to avoid using the
mimicry term from our ECML 2013 paper, as described in:
- https://pralab.diee.unica.it/sites/default/files/zhang15-tcyb.pdf, IEEE TCYB, 2015
If the attack is not successful when starting from x0,
we initialize the optimization by projecting a point from another
class onto the feasible domain and try again.
Parameters
----------
classifier : CClassifier
Target classifier.
double_init_ds : CDataset or None, optional
Dataset used to initialize an alternative init point (double init).
double_init : bool, optional
If True (default), use double initialization point.
Needs double_init_ds not to be None.
distance : {'l1' or 'l2'}, optional
Norm to use for computing the distance of the adversarial example
from the original sample. Default 'l2'.
dmax : scalar, optional
Maximum value of the perturbation. Default 1.
lb, ub : int or CArray, optional
Lower/Upper bounds. If int, the same bound will be applied to all
the features. If CArray, a different bound can be specified for each
feature. Default `lb = 0`, `ub = 1`.
y_target : int or None, optional
If None an error-generic attack will be performed, else a
error-specific attack to have the samples misclassified as
belonging to the `y_target` class.
attack_classes : 'all' or CArray, optional
Array with the classes that can be manipulated by the attacker or
'all' (default) if all classes can be manipulated.
solver_params : dict or None, optional
Parameters for the solver.
Default None, meaning that default parameters will be used.
See :class:`COptimizerPGDExp` for more information.
Attributes
----------
class_type : 'e-pgd-exp'
"""
__class_type = 'e-pgd-exp'
def __init__(self, classifier,
double_init_ds=None,
double_init=True,
distance='l1',
dmax=0,
lb=0,
ub=1,
y_target=None,
attack_classes='all',
solver_params=None):
# INTERNALS
self._x0 = None
self._y0 = None
# this is an alternative init point. This could be a single point
# (targeted evasion) or an array of multiple points, one for each
# class (indiscriminate evasion). See _get_point_with_min_f_obj()
self._xk = None
super(CAttackEvasionPGDExp, self).__init__(
classifier=classifier,
double_init_ds=double_init_ds,
double_init=double_init,
distance=distance,
dmax=dmax,
lb=lb,
ub=ub,
y_target=y_target,
attack_classes=attack_classes,
solver_params=solver_params)
self.solver_type = 'pgd-exp'
| 36.375
| 88
| 0.649749
| 483
| 3,783
| 4.987578
| 0.414079
| 0.041511
| 0.024907
| 0.019925
| 0.063097
| 0.010793
| 0
| 0
| 0
| 0
| 0
| 0.022694
| 0.277822
| 3,783
| 103
| 89
| 36.728155
| 0.859078
| 0.684642
| 0
| 0
| 0
| 0
| 0.021212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aaf87f9c4ecb098ef160db1b17bb991d9edaacc
| 3,172
|
py
|
Python
|
mail_log_parser/data_manager.py
|
kinteriq/mail-log-parser
|
e4242387c1767db611e266d463c817aeb8a74377
|
[
"MIT"
] | null | null | null |
mail_log_parser/data_manager.py
|
kinteriq/mail-log-parser
|
e4242387c1767db611e266d463c817aeb8a74377
|
[
"MIT"
] | null | null | null |
mail_log_parser/data_manager.py
|
kinteriq/mail-log-parser
|
e4242387c1767db611e266d463c817aeb8a74377
|
[
"MIT"
] | null | null | null |
import sqlite3
class ManageData:
def __init__(self, queue_tracker_db, email_tracker_db, delivery_tracker_db):
self.queue_tracker_db = queue_tracker_db
self.email_tracker_db = email_tracker_db
self.delivery_tracker_db = delivery_tracker_db
def manage_queue_tracker(self, fields):
"""
Receive one of the following located groups as <fields>:
[('ID', <id>), ('client_email', <email>)];
[('ID', <id>), ('receivers', <email>), ('status', <status>)];
[('ID', <id>)];
and manage the <queue_tracker_db> accordingly.
"""
if len(fields) == 1:
ID = fields[0][1]
self.manage_email_tracker(ID)
self.manage_delivery_tracker(ID)
del self.queue_tracker_db[ID]
elif len(fields) == 2:
ID, client_email = (f[1] for f in fields)
self.queue_tracker_db[ID]['client_email'] = client_email
elif len(fields) == 3:
ID, receiver, status = (f[1] for f in fields)
if status == 'sent':
code = 1
else:
code = 0
self.queue_tracker_db[ID]['receivers'][receiver] = code
def manage_email_tracker(self, ID):
"""
Retrieve client's email from the <queue_tracker_db> by <ID>
with the amount of 'receivers' whose 'status' == 1
and store it in the <email_tracker_db>.
"""
client_email = self.queue_tracker_db[ID]['client_email']
receivers = self.queue_tracker_db[ID]['receivers']
delivered_mail = [r for r in receivers if receivers[r] == 1]
if client_email in self.email_tracker_db:
self.email_tracker_db[client_email] += len(delivered_mail)
else:
self.email_tracker_db[client_email] = len(delivered_mail)
def manage_delivery_tracker(self, ID):
"""
Go through all receivers of <ID> queue of <queue_tracker_db>,
and add their delivery statuses to the <delivery_tracker_db> counter
"""
receivers = self.queue_tracker_db[ID]['receivers']
for receiver in receivers:
if receivers[receiver] == 1:
self.delivery_tracker_db['delivered'] += 1
else:
self.delivery_tracker_db['undelivered'] += 1
class ManageDatabase(ManageData):
def __init__(self, path, *args, **kwargs):
self.path = path
super().__init__(*args, **kwargs)
def _execute_command(self, *command):
con = sqlite3.connect(self.path)
cursor = con.cursor()
result = cursor.execute(*command)
if result:
result = result.fetchall()
con.commit()
con.close()
return result
def create_db(self):
self._execute_command('''CREATE TABLE IF NOT EXISTS email_tracker
(client_email TEXT PRIMARY KEY, num_of_letters_sent INTEGER)''')
def transfer_data(self):
for email, num_of_letters in self.email_tracker_db.items():
self._execute_command('''INSERT INTO email_tracker VALUES
(?, ?)''', (email, num_of_letters))
| 38.216867
| 80
| 0.596154
| 385
| 3,172
| 4.641558
| 0.246753
| 0.130946
| 0.094012
| 0.080582
| 0.266928
| 0.179631
| 0.127588
| 0.050364
| 0.050364
| 0
| 0
| 0.007124
| 0.291929
| 3,172
| 82
| 81
| 38.682927
| 0.788513
| 0.163304
| 0
| 0.087719
| 0
| 0
| 0.096123
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140351
| false
| 0
| 0.017544
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab07fb42baef7b3a437132ef3d9c03a2ec1e478
| 561
|
py
|
Python
|
Util/training_util.py
|
lychenyoko/content-aware-gan-compression
|
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 47
|
2021-07-04T14:51:38.000Z
|
2022-03-17T07:02:06.000Z
|
Util/training_util.py
|
lychenyoko/content-aware-gan-compression
|
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 9
|
2021-04-10T08:32:08.000Z
|
2022-02-21T03:14:40.000Z
|
Util/training_util.py
|
lychenyoko/content-aware-gan-compression
|
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 7
|
2021-07-02T08:11:55.000Z
|
2022-01-12T18:06:40.000Z
|
import math
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
| 33
| 83
| 0.68984
| 84
| 561
| 4.321429
| 0.428571
| 0.096419
| 0.115702
| 0.104683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019438
| 0.174688
| 561
| 16
| 84
| 35.0625
| 0.764579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab246f495c8f138c1a41820ece75a23cb6ba83c
| 37,122
|
py
|
Python
|
autoarray/structures/grids/two_d/grid_2d_util.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
autoarray/structures/grids/two_d/grid_2d_util.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
autoarray/structures/grids/two_d/grid_2d_util.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import Tuple, Union, Optional
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.geometry import geometry_util
from autoarray import numba_util
from autoarray.mask import mask_2d_util
@numba_util.jit()
def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]:
"""
Returns the centre of a grid from a 1D grid.
Parameters
----------
grid_2d_slim
The 1D grid of values which are mapped to a 2D array.
Returns
-------
(float, float)
The (y,x) central coordinates of the grid.
"""
centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0
centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0
return centre_y, centre_x
@numba_util.jit()
def grid_2d_slim_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into
a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates a the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore
removed and not included in the slimmed grid.
Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size)
grid_slim = np.zeros(shape=(total_sub_pixels, 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin
)
sub_index = 0
y_sub_half = pixel_scales[0] / 2
y_sub_step = pixel_scales[0] / (sub_size)
x_sub_half = pixel_scales[1] / 2
x_sub_step = pixel_scales[1] / (sub_size)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
for y1 in range(sub_size):
for x1 in range(sub_size):
grid_slim[sub_index, 0] = -(
y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0)
)
grid_slim[sub_index, 1] = (
x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0)
)
sub_index += 1
return grid_slim
def grid_2d_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are
given values (0.0, 0.0).
Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
grid_2d_slim = grid_2d_slim_via_mask_from(
mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
return grid_2d_native_from(
grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size
)
def grid_2d_slim_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_slim_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def grid_2d_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided
into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes
the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
@numba_util.jit()
def grid_scaled_2d_slim_radial_projected_from(
extent: np.ndarray,
centre: Tuple[float, float],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
shape_slim: Optional[int] = 0,
) -> np.ndarray:
"""
Determine a projected radial grid of points from a 2D region of coordinates defined by an
extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows:
1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of
the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes).
2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the
pixel_scale in the x dimension is used).
3) Determine the number of pixels between the centre and the edge of the region using the longest path between the
two chosen above.
4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate
from the centre in increasing steps of the pixel-scale.
5) Rotate these radial coordinates by the input `angle` clockwise.
A schematric is shown below:
-------------------
| |
|<- - - - ->x | x = centre
| | <-> = longest radial path from centre to extent edge
| |
-------------------
Using the centre x above, this function finds the longest radial path to the edge of the extent window.
The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre.
This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data
structure so that it can be used in functions which require that a 2D grid structure is input.
Parameters
----------
extent
The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax]
centre : (float, flloat)
The (y,x) central coordinate which the radial grid is traced outwards from.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
shape_slim
Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is
used (due to numba None cannot be used as a default value).
Returns
-------
ndarray
A radial set of points sampling the longest distance from the centre to the edge of the extent in along the
positive x-axis.
"""
distance_to_positive_x = extent[1] - centre[1]
distance_to_positive_y = extent[3] - centre[0]
distance_to_negative_x = centre[1] - extent[0]
distance_to_negative_y = centre[0] - extent[2]
scaled_distance = max(
[
distance_to_positive_x,
distance_to_positive_y,
distance_to_negative_x,
distance_to_negative_y,
]
)
if (scaled_distance == distance_to_positive_y) or (
scaled_distance == distance_to_negative_y
):
pixel_scale = pixel_scales[0]
else:
pixel_scale = pixel_scales[1]
if shape_slim == 0:
shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1
grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2))
grid_scaled_2d_slim_radii[:, 0] += centre[0]
radii = centre[1]
for slim_index in range(shape_slim):
grid_scaled_2d_slim_radii[slim_index, 1] = radii
radii += pixel_scale / sub_size
return grid_scaled_2d_slim_radii
@numba_util.jit()
def grid_pixels_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel
coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner
relative to the input scaled coordinate.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their
1D grid pixel coordinate values.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted to.
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = (
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = (
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = int(
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = int(
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards.
The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,).
For example:
The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0.
The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4.
The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A grid of slimmed pixel indexes with dimensions (total_pixels,).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_2d_slim,
shape_native=shape_native,
pixel_scales=pixel_scales,
origin=origin,
)
grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0])
for slim_index in range(grid_pixels_2d_slim.shape[0]):
grid_pixel_indexes_2d_slim[slim_index] = int(
grid_pixels_2d_slim[slim_index, 0] * shape_native[1]
+ grid_pixels_2d_slim[slim_index, 1]
)
return grid_pixel_indexes_2d_slim
@numba_util.jit()
def grid_scaled_2d_slim_from(
grid_pixels_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this
origin after computing their values from the 1D grid pixel indexes.
Parameters
----------
grid_pixels_2d_slim: np.ndarray
The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_scaled_2d_slim[slim_index, 0] = (
-(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5)
* pixel_scales[0]
)
grid_scaled_2d_slim[slim_index, 1] = (
grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5
) * pixel_scales[1]
return grid_scaled_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_from(
grid_scaled_2d: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d: np.ndarray
The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2).
Examples
--------
grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for y in range(grid_scaled_2d.shape[0]):
for x in range(grid_scaled_2d.shape[1]):
grid_pixels_2d[y, x, 0] = int(
(-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5
)
grid_pixels_2d[y, x, 1] = int(
(grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5
)
return grid_pixels_2d
@numba_util.jit()
def relocated_grid_via_jit_from(grid, border_grid):
"""
Relocate the coordinates of a grid to its border if they are outside the border, where the border is
defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*).
This is performed as follows:
1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid.
2: Compute the radial distance of every grid coordinate from the origin.
3: For every coordinate, find its nearest pixel in the border.
4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired
border pixel's radial distance.
5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the
border (if its inside the border, do nothing).
The method can be used on uniform or irregular grids, however for irregular grids the border of the
'image-plane' mask is used to define border pixels.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
border_grid : Grid2D
The grid of border (y,x) coordinates.
"""
grid_relocated = np.zeros(grid.shape)
grid_relocated[:, :] = grid[:, :]
border_origin = np.zeros(2)
border_origin[0] = np.mean(border_grid[:, 0])
border_origin[1] = np.mean(border_grid[:, 1])
border_grid_radii = np.sqrt(
np.add(
np.square(np.subtract(border_grid[:, 0], border_origin[0])),
np.square(np.subtract(border_grid[:, 1], border_origin[1])),
)
)
border_min_radii = np.min(border_grid_radii)
grid_radii = np.sqrt(
np.add(
np.square(np.subtract(grid[:, 0], border_origin[0])),
np.square(np.subtract(grid[:, 1], border_origin[1])),
)
)
for pixel_index in range(grid.shape[0]):
if grid_radii[pixel_index] > border_min_radii:
closest_pixel_index = np.argmin(
np.square(grid[pixel_index, 0] - border_grid[:, 0])
+ np.square(grid[pixel_index, 1] - border_grid[:, 1])
)
move_factor = (
border_grid_radii[closest_pixel_index] / grid_radii[pixel_index]
)
if move_factor < 1.0:
grid_relocated[pixel_index, :] = (
move_factor * (grid[pixel_index, :] - border_origin[:])
+ border_origin[:]
)
return grid_relocated
@numba_util.jit()
def furthest_grid_2d_slim_index_from(
grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float]
) -> int:
distance_to_centre = 0.0
for slim_index in slim_indexes:
y = grid_2d_slim[slim_index, 0]
x = grid_2d_slim[slim_index, 1]
distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2
if distance_to_centre_new >= distance_to_centre:
distance_to_centre = distance_to_centre_new
furthest_grid_2d_slim_index = slim_index
return furthest_grid_2d_slim_index
def grid_2d_slim_from(
grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked
pixels to a slimmed grid of shape [total_unmasked_pixels, 2].
The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such
that for an grid of shape (3,3) where all pixels are unmasked:
- pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid.
- pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid.
- pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid.
Parameters
----------
grid_2d_native : ndarray
The native grid of (y,x) values which are mapped to the slimmed grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels).
"""
grid_1d_slim_y = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size
)
grid_1d_slim_x = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size
)
return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1)
def grid_2d_native_from(
grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values
from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the
native 2D grid where masked values are set to zero.
This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked
pixels, for example:
- If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid.
- If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid.
- If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid.
Parameters
----------
grid_2d_slim
The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D
mapped from the slimmed grid.
"""
grid_2d_native_y = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size
)
grid_2d_native_x = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size
)
return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
@numba_util.jit()
def grid_2d_slim_upscaled_from(
grid_slim: np.ndarray,
upscale_factor: int,
pixel_scales: Union[float, Tuple[float, float]],
) -> np.ndarray:
"""
From an input slimmed 2D grid, return an upscaled slimmed 2D grid where (y,x) coordinates are added at an
upscaled resolution to each grid coordinate, analogous to a sub-grid.
Parameters
----------
grid_slim
The slimmed grid of (y,x) coordinates over which a square uniform grid is overlaid.
upscale_factor
The upscaled resolution at which the new grid coordinates are computed.
pixel_scales
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
grid_2d_slim_upscaled = np.zeros(
shape=(grid_slim.shape[0] * upscale_factor ** 2, 2)
)
upscale_index = 0
y_upscale_half = pixel_scales[0] / 2
y_upscale_step = pixel_scales[0] / upscale_factor
x_upscale_half = pixel_scales[1] / 2
x_upscale_step = pixel_scales[1] / upscale_factor
for slim_index in range(grid_slim.shape[0]):
y_grid = grid_slim[slim_index, 0]
x_grid = grid_slim[slim_index, 1]
for y in range(upscale_factor):
for x in range(upscale_factor):
grid_2d_slim_upscaled[upscale_index, 0] = (
y_grid
+ y_upscale_half
- y * y_upscale_step
- (y_upscale_step / 2.0)
)
grid_2d_slim_upscaled[upscale_index, 1] = (
x_grid
- x_upscale_half
+ x * x_upscale_step
+ (x_upscale_step / 2.0)
)
upscale_index += 1
return grid_2d_slim_upscaled
def grid_2d_of_points_within_radius(
radius: float, centre: Tuple[float, float], grid_2d: np.ndarray
):
y_inside = []
x_inside = []
for i in range(len(grid_2d[:, 0])):
if (grid_2d[i, 0] - centre[0]) ** 2 + (
grid_2d[i, 1] - centre[1]
) ** 2 > radius ** 2:
y_inside.append(grid_2d[i, 0])
x_inside.append(grid_2d[i, 1])
return np.asarray(y_inside, x_inside)
def compute_polygon_area(points):
x = points[:, 1]
y = points[:, 0]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
| 39.787781
| 130
| 0.645574
| 5,678
| 37,122
| 4.032582
| 0.056886
| 0.028301
| 0.026204
| 0.027252
| 0.706075
| 0.659038
| 0.618946
| 0.601869
| 0.576713
| 0.550684
| 0
| 0.029365
| 0.267954
| 37,122
| 932
| 131
| 39.830472
| 0.813211
| 0.563116
| 0
| 0.305638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053412
| false
| 0
| 0.017804
| 0
| 0.124629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab2e129e7612f7fdafee8257f8411edf7808187
| 2,689
|
py
|
Python
|
Proxies/Proxies.py
|
crown-prince/proxies
|
a3342d414675dbc89cdf1b953b46ea518f451166
|
[
"MIT"
] | 2
|
2018-08-28T06:34:16.000Z
|
2018-12-05T01:33:33.000Z
|
Proxies/Proxies.py
|
crown-prince/proxies
|
a3342d414675dbc89cdf1b953b46ea518f451166
|
[
"MIT"
] | null | null | null |
Proxies/Proxies.py
|
crown-prince/proxies
|
a3342d414675dbc89cdf1b953b46ea518f451166
|
[
"MIT"
] | 3
|
2017-11-23T03:16:49.000Z
|
2019-05-05T05:23:57.000Z
|
# coding: utf-8
import requests, math
import gevent
from gevent.queue import Queue
from gevent import monkey; monkey.patch_all()
from pyquery import PyQuery
class Proxies():
def __init__(self):
self.domestic_gn_url = 'http://www.kuaidaili.com/free/inha/{0}/'
self.domestic_pt_url = 'http://www.kuaidaili.com/free/intr/{0}/'
self.abroad_gn_url = 'http://www.kuaidaili.com/free/outha/{0}/'
self.abroad_pt_url = 'http://www.kuaidaili.com/free/outtr/{0}/'
self.result_arr = []
self.s = requests.Session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Referer': 'http://www.kuaidaili.com/'
}
def fetch_urls(self, queue, quantity):
while not queue.empty():
url = queue.get()
html = self.s.get(url, headers=self.headers).text
pq = PyQuery(html)
size = pq.find('tbody tr').size()
for index in range(size):
item = pq.find('tbody tr').eq(index)
ip = item.find('td').eq(0).text()
port = item.find('td').eq(1).text()
_type = item.find('td').eq(3).text()
self.result_arr.append({
str(_type).lower(): '{0}://{1}:{2}'.format(str(_type).lower(), ip, port)
})
if len(self.result_arr) >= quantity:
break
def get_proxies(self, quantity, type):
'''
quantity: 数量
type: 类型
1.国内高匿代理
2.国内普通代理
3.国外高匿代理
4.国外普通代理
'''
url_queue = Queue()
need_pages = int(math.ceil(quantity/15))
# 判断类型
if type == 1:
# 国内高匿代理
base_url = self.domestic_gn_url
elif type == 2:
# 国内普通代理
base_url = self.domestic_pt_url
elif type == 3:
# 国外高匿代理
base_url = self.abroad_gn_url
elif type == 4:
# 国外普通代理
base_url = self.abroad_pt_url
# 获取所需要的页面URL
for index in range(need_pages):
url = base_url.format(index+1)
url_queue.put(url)
# 处理所有URL,开启2个协程
gevent_list = []
for index in range(2):
gevent_list.append(
gevent.spawn(self.fetch_urls, url_queue, quantity)
)
gevent.joinall(gevent_list)
def get_result(self):
return self.result_arr
if __name__ == '__main__':
p = Proxies()
p.get_proxies(20, 1)
result = p.get_result()
print(result)
| 32.39759
| 150
| 0.531424
| 335
| 2,689
| 4.095522
| 0.367164
| 0.02551
| 0.058309
| 0.069242
| 0.081633
| 0.081633
| 0.081633
| 0
| 0
| 0
| 0
| 0.030354
| 0.338416
| 2,689
| 82
| 151
| 32.792683
| 0.740866
| 0.055039
| 0
| 0
| 0
| 0.016667
| 0.147368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.083333
| 0.016667
| 0.183333
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab39451258fbf9b0748574dd450aedbe38e6382
| 21,092
|
py
|
Python
|
parallelformers/policies/base/auto.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 454
|
2021-07-18T02:51:23.000Z
|
2022-03-31T04:00:53.000Z
|
parallelformers/policies/base/auto.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 16
|
2021-07-18T10:47:21.000Z
|
2022-03-22T18:49:57.000Z
|
parallelformers/policies/base/auto.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 33
|
2021-07-18T04:48:28.000Z
|
2022-03-14T22:16:36.000Z
|
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import suppress
from typing import List, Union
from torch import nn
from parallelformers.policies.base import Policy
class AutoPolicy:
"""Class for finds automatically appropriate policies for the current model"""
def __init__(self):
self.builtin_policies = {}
with suppress(Exception):
from transformers.models.gpt_neo.modeling_gpt_neo import (
GPTNeoPreTrainedModel,
)
from parallelformers.policies.gpt_neo import GPTNeoPolicy
self.builtin_policies[GPTNeoPreTrainedModel] = [
GPTNeoPolicy,
]
with suppress(Exception):
from transformers.models.bert.modeling_bert import (
BertPreTrainedModel,
)
from parallelformers.policies.bert import BertPolicy
self.builtin_policies[BertPreTrainedModel] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.bart.modeling_bart import (
BartPretrainedModel,
)
from parallelformers.policies.bart import (
BartDecoderPolicy,
BartEncoderPolicy,
)
self.builtin_policies[BartPretrainedModel] = [
BartEncoderPolicy,
BartDecoderPolicy,
]
with suppress(Exception):
from transformers.models.blenderbot.modeling_blenderbot import (
BlenderbotPreTrainedModel,
)
from parallelformers.policies.blenderbot import (
BlenderbotDecoderPolicy,
BlenderbotEncoderPolicy,
)
self.builtin_policies[BlenderbotPreTrainedModel] = [
BlenderbotEncoderPolicy,
BlenderbotDecoderPolicy,
]
with suppress(Exception):
from transformers.models.deberta.modeling_deberta import (
DebertaPreTrainedModel,
)
from parallelformers.policies.deberta import DebertaPolicy
self.builtin_policies[DebertaPreTrainedModel] = [
DebertaPolicy,
]
with suppress(Exception):
from transformers.models.transfo_xl.modeling_transfo_xl import (
TransfoXLPreTrainedModel,
)
from parallelformers.policies.transfo_xl import TransfoXLPolicy
self.builtin_policies[TransfoXLPreTrainedModel] = [
TransfoXLPolicy,
]
with suppress(Exception):
from transformers.models.roberta.modeling_roberta import (
RobertaPreTrainedModel,
)
from parallelformers.policies.roberta import RobertaPolicy
self.builtin_policies[RobertaPreTrainedModel] = [
RobertaPolicy,
]
with suppress(Exception):
from transformers.models.albert.modeling_albert import (
AlbertPreTrainedModel,
)
from parallelformers.policies.albert import AlbertPolicy
self.builtin_policies[AlbertPreTrainedModel] = [
AlbertPolicy,
]
with suppress(Exception):
from transformers.models.gpt2.modeling_gpt2 import (
GPT2PreTrainedModel,
)
from parallelformers.policies.gpt2 import GPT2Policy
self.builtin_policies[GPT2PreTrainedModel] = [
GPT2Policy,
]
with suppress(Exception):
from transformers.models.ctrl.modeling_ctrl import (
CTRLPreTrainedModel,
)
from parallelformers.policies.ctrl import CTRLPolicy
self.builtin_policies[CTRLPreTrainedModel] = [
CTRLPolicy,
]
with suppress(Exception):
from transformers.models.deberta_v2.modeling_deberta_v2 import (
DebertaV2PreTrainedModel,
)
from parallelformers.policies.deberta_v2 import DebertaV2Policy
self.builtin_policies[DebertaV2PreTrainedModel] = [
DebertaV2Policy,
]
with suppress(Exception):
from transformers.models.openai.modeling_openai import (
OpenAIGPTPreTrainedModel,
)
from parallelformers.policies.openai import OpenAIGPTPolicy
self.builtin_policies[OpenAIGPTPreTrainedModel] = [
OpenAIGPTPolicy,
]
with suppress(Exception):
from transformers.models.electra.modeling_electra import (
ElectraPreTrainedModel,
)
from parallelformers.policies.electra import ElectraPolicy
self.builtin_policies[ElectraPreTrainedModel] = [
ElectraPolicy,
]
with suppress(Exception):
from transformers.models.blenderbot_small.modeling_blenderbot_small import (
BlenderbotSmallPreTrainedModel,
)
from parallelformers.policies.blenderbot_small import (
BlenderbotSmallDecoderPolicy,
BlenderbotSmallEncoderPolicy,
)
self.builtin_policies[BlenderbotSmallPreTrainedModel] = [
BlenderbotSmallEncoderPolicy,
BlenderbotSmallDecoderPolicy,
]
with suppress(Exception):
from transformers.models.distilbert.modeling_distilbert import (
DistilBertPreTrainedModel,
)
from parallelformers.policies.distil_bert import DistilBertPolicy
self.builtin_policies[DistilBertPreTrainedModel] = [
DistilBertPolicy,
]
with suppress(Exception):
from transformers.models.convbert.modeling_convbert import (
ConvBertPreTrainedModel,
)
from parallelformers.policies.convbert import ConvBertPolicy
self.builtin_policies[ConvBertPreTrainedModel] = [
ConvBertPolicy,
]
with suppress(Exception):
from transformers.models.bert_generation.modeling_bert_generation import (
BertGenerationPreTrainedModel,
)
from parallelformers.policies.bert import BertPolicy
self.builtin_policies[BertGenerationPreTrainedModel] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.big_bird.modeling_big_bird import (
BigBirdPreTrainedModel,
)
from parallelformers.policies.bigbird import BigBirdPolicy
self.builtin_policies[BigBirdPreTrainedModel] = [
BigBirdPolicy,
]
with suppress(Exception):
from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import (
BigBirdPegasusPreTrainedModel,
)
from parallelformers.policies.bigbird_pegasus import (
BigBirdPegasusDecoderPolicy,
BigBirdPegasusEncoderPolicy,
)
self.builtin_policies[BigBirdPegasusPreTrainedModel] = [
BigBirdPegasusEncoderPolicy,
BigBirdPegasusDecoderPolicy,
]
with suppress(Exception):
from transformers.models.vit.modeling_vit import ViTPreTrainedModel
from parallelformers.policies.vit import ViTPolicy
self.builtin_policies[ViTPreTrainedModel] = [
ViTPolicy,
]
with suppress(Exception):
from transformers.models.deit.modeling_deit import (
DeiTPreTrainedModel,
)
from parallelformers.policies.deit import DeiTPolicy
self.builtin_policies[DeiTPreTrainedModel] = [DeiTPolicy]
with suppress(Exception):
from transformers.models.mbart.modeling_mbart import (
MBartPreTrainedModel,
)
from parallelformers.policies.mbart import (
MBartDecoderPolicy,
MBartEncoderPolicy,
)
self.builtin_policies[MBartPreTrainedModel] = [
MBartEncoderPolicy,
MBartDecoderPolicy,
]
with suppress(Exception):
from transformers.models.t5.modeling_t5 import T5PreTrainedModel
from parallelformers.policies.t5 import T5Policy
self.builtin_policies[T5PreTrainedModel] = [
T5Policy,
]
with suppress(Exception):
from transformers.models.pegasus.modeling_pegasus import (
PegasusPreTrainedModel,
)
from parallelformers.policies.pegasus import (
PegasusDecoderPolicy,
PegasusEncoderPolicy,
)
self.builtin_policies[PegasusPreTrainedModel] = [
PegasusEncoderPolicy,
PegasusDecoderPolicy,
]
with suppress(Exception):
from transformers.models.fsmt.modeling_fsmt import (
PretrainedFSMTModel,
)
from parallelformers.policies.fsmt import (
FSMTDecoderPolicy,
FSMTEncoderPolicy,
)
self.builtin_policies[PretrainedFSMTModel] = [
FSMTEncoderPolicy,
FSMTDecoderPolicy,
]
with suppress(Exception):
from transformers.models.xlm.modeling_xlm import XLMPreTrainedModel
from parallelformers.policies.xlm import (
XLMAttentionPolicy,
XLMMLPPolicy,
)
self.builtin_policies[XLMPreTrainedModel] = [
XLMAttentionPolicy,
XLMMLPPolicy,
]
with suppress(Exception):
from transformers.models.m2m_100.modeling_m2m_100 import (
M2M100PreTrainedModel,
)
from parallelformers.policies.m2m_100 import (
M2M100DecoderPolicy,
M2M100EncoderPolicy,
)
self.builtin_policies[M2M100PreTrainedModel] = [
M2M100EncoderPolicy,
M2M100DecoderPolicy,
]
with suppress(Exception):
from transformers.models.marian.modeling_marian import (
MarianPreTrainedModel,
)
from parallelformers.policies.marian import (
MarianDecoderPolicy,
MarianEncoderPolicy,
)
self.builtin_policies[MarianPreTrainedModel] = [
MarianEncoderPolicy,
MarianDecoderPolicy,
]
with suppress(Exception):
from transformers.models.mobilebert.modeling_mobilebert import (
MobileBertPreTrainedModel,
)
from parallelformers.policies.mobilebert import MobileBertPolicy
self.builtin_policies[MobileBertPreTrainedModel] = [
MobileBertPolicy,
]
with suppress(Exception):
from transformers.models.mpnet.modeling_mpnet import (
MPNetPreTrainedModel,
)
from parallelformers.policies.mpnet import (
MPNetEncoderPolicy,
MPNetLayerPolicy,
)
self.builtin_policies[MPNetPreTrainedModel] = [
MPNetEncoderPolicy,
MPNetLayerPolicy,
]
with suppress(Exception):
from transformers.models.luke.modeling_luke import (
LukePreTrainedModel,
)
from parallelformers.policies.luke import LukePolicy
self.builtin_policies[LukePreTrainedModel] = [
LukePolicy,
]
with suppress(Exception):
from transformers.models.dpr.modeling_dpr import (
DPRPretrainedContextEncoder,
DPRPretrainedQuestionEncoder,
DPRPretrainedReader,
)
self.builtin_policies[DPRPretrainedReader] = [
BertPolicy,
]
self.builtin_policies[DPRPretrainedQuestionEncoder] = [
BertPolicy,
]
self.builtin_policies[DPRPretrainedContextEncoder] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.lxmert.modeling_lxmert import (
LxmertPreTrainedModel,
)
from parallelformers.policies.lxmert import LxmertPolicy
self.builtin_policies[LxmertPreTrainedModel] = [
LxmertPolicy,
]
with suppress(Exception):
from transformers.models.hubert.modeling_hubert import (
HubertPreTrainedModel,
)
from parallelformers.policies.hubert import HubertPolicy
self.builtin_policies[HubertPreTrainedModel] = [
HubertPolicy,
]
with suppress(Exception):
from transformers.models.wav2vec2.modeling_wav2vec2 import (
Wav2Vec2PreTrainedModel,
)
from parallelformers.policies.wav2vec import Wav2VecPolicy
self.builtin_policies[Wav2Vec2PreTrainedModel] = [
Wav2VecPolicy,
]
with suppress(Exception):
from transformers.models.xlnet.modeling_xlnet import (
XLNetPreTrainedModel,
)
from parallelformers.policies.xlnet import XLNetPolicy
self.builtin_policies[XLNetPreTrainedModel] = [
XLNetPolicy,
]
with suppress(Exception):
from transformers.models.retribert.modeling_retribert import (
RetriBertPreTrainedModel,
)
self.builtin_policies[RetriBertPreTrainedModel] = [
BertPolicy,
]
with suppress(Exception):
from transformers.models.clip.modeling_clip import (
CLIPPreTrainedModel,
)
from parallelformers.policies.clip import (
CLIPLayerPolicy,
CLIPTextPolicy,
CLIPVisionPolicy,
)
self.builtin_policies[CLIPPreTrainedModel] = [
CLIPLayerPolicy,
CLIPTextPolicy,
CLIPVisionPolicy,
]
with suppress(Exception):
from transformers.models.detr.modeling_detr import (
DetrPreTrainedModel,
)
from parallelformers.policies.detr import (
DetrDecoderPolicy,
DetrEncoderPolicy,
)
self.builtin_policies[DetrPreTrainedModel] = [
DetrEncoderPolicy,
DetrDecoderPolicy,
]
with suppress(Exception):
from transformers.models.reformer.modeling_reformer import (
ReformerPreTrainedModel,
)
from parallelformers.policies.reformer import ReformerPolicy
self.builtin_policies[ReformerPreTrainedModel] = [
ReformerPolicy,
]
with suppress(Exception):
from transformers.models.longformer.modeling_longformer import (
LongformerPreTrainedModel,
)
from parallelformers.policies.longformer import LongformerPolicy
self.builtin_policies[LongformerPreTrainedModel] = [
LongformerPolicy,
]
with suppress(Exception):
from transformers.models.roformer.modeling_roformer import (
RoFormerPreTrainedModel,
)
from parallelformers.policies.roformer import RoformerPolicy
self.builtin_policies[RoFormerPreTrainedModel] = [
RoformerPolicy,
]
with suppress(Exception):
from transformers.models.ibert.modeling_ibert import (
IBertPreTrainedModel,
)
from parallelformers.policies.ibert import IBertPolicy
self.builtin_policies[IBertPreTrainedModel] = [
IBertPolicy,
]
with suppress(Exception):
from transformers.models.tapas.modeling_tapas import (
TapasPreTrainedModel,
)
from parallelformers.policies.tapas import TapasPolicy
self.builtin_policies[TapasPreTrainedModel] = [
TapasPolicy,
]
with suppress(Exception):
from transformers.models.funnel.modeling_funnel import (
FunnelPreTrainedModel,
)
from parallelformers.policies.funnel import FunnelPolicy
self.builtin_policies[FunnelPreTrainedModel] = [
FunnelPolicy,
]
with suppress(Exception):
from transformers.models.layoutlm.modeling_layoutlm import (
LayoutLMPreTrainedModel,
)
from parallelformers.policies.layoutlm import LayoutLMPolicy
self.builtin_policies[LayoutLMPreTrainedModel] = [
LayoutLMPolicy,
]
with suppress(Exception):
from transformers.models.led.modeling_led import LEDPreTrainedModel
from parallelformers.policies.led import (
LEDDecoderPolicy,
LEDEncoderPolicy,
)
self.builtin_policies[LEDPreTrainedModel] = [
LEDEncoderPolicy,
LEDDecoderPolicy,
]
with suppress(Exception):
from transformers.models.prophetnet.modeling_prophetnet import (
ProphetNetPreTrainedModel,
)
from parallelformers.policies.prophetnet import (
ProphetNetDecoderPolicy,
ProphetNetEncoderPolicy,
)
self.builtin_policies[ProphetNetPreTrainedModel] = [
ProphetNetEncoderPolicy,
ProphetNetDecoderPolicy,
]
with suppress(Exception):
from transformers.models.visual_bert.modeling_visual_bert import (
VisualBertPreTrainedModel,
)
from parallelformers.policies.visual_bert import VisualBertPolicy
self.builtin_policies[VisualBertPreTrainedModel] = [
VisualBertPolicy,
]
with suppress(Exception):
from transformers.models.speech_to_text.modeling_speech_to_text import (
Speech2TextPreTrainedModel,
)
from parallelformers.policies.speech_to_text import (
Speech2TextDecoderPolicy,
Speech2TextEncoderPolicy,
)
self.builtin_policies[Speech2TextPreTrainedModel] = [
Speech2TextEncoderPolicy,
Speech2TextDecoderPolicy,
]
with suppress(Exception):
from transformers.models.gptj.modeling_gptj import (
GPTJPreTrainedModel,
)
from parallelformers.policies.gptj import GPTJPolicy
self.builtin_policies[GPTJPreTrainedModel] = [
GPTJPolicy,
]
with suppress(Exception):
from transformers.models.megatron_bert import (
MegatronBertPreTrainedModel,
)
from parallelformers.policies.megtron_bert import (
MegatronBertPolicy,
)
self.builtin_policies[MegatronBertPreTrainedModel] = [
MegatronBertPolicy,
]
def get_policy(self, model: nn.Module) -> Union[List[Policy], None]:
"""
Find appropriate policies for the current model
Args:
model (nn.Module): model to parallelize
Returns:
Union[List[Policy], None]: appropriate policies or none
"""
for k, v in self.available().items():
if isinstance(model, k):
return v
return None
def available(self):
"""Dictionary of available models and policies"""
return self.builtin_policies
| 30.836257
| 88
| 0.585815
| 1,399
| 21,092
| 8.727663
| 0.204432
| 0.05045
| 0.087142
| 0.10647
| 0.206716
| 0.206716
| 0.052744
| 0.010811
| 0.010811
| 0
| 0
| 0.006082
| 0.360753
| 21,092
| 683
| 89
| 30.881406
| 0.899503
| 0.039351
| 0
| 0.259109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006073
| false
| 0
| 0.214575
| 0
| 0.228745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab472bf8d8e22693d678c504a9b881ed31f9478
| 3,042
|
py
|
Python
|
main/upper_air_humidity.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
main/upper_air_humidity.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
main/upper_air_humidity.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Name: upper_air_humidity.py
Make upper level weather chart.
Usage: python3 upper_air_humidity.py --file <ncfile>
Author: Ryosuke Tomita
Date: 2022/01/07
"""
import argparse
from ncmagics import fetchtime, japanmap, meteotool
def parse_args() -> dict:
"""parse_args.
set file path.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="set ncfile.", type=str)
p = parser.parse_args()
args = {"file": p.file}
return args
def output_name(ncfile: str, isobaric_surface: int) -> str:
"""output_name.
Args:
ncfile (str): ncfile
isobaric_surface (int): isobaric_surface
Returns:
str:
"""
date_time = fetchtime.fetch_time(ncfile)
outname = (date_time + "_" + str(isobaric_surface))
return outname
def main():
"""main.
"""
args = parse_args()
meteo_tool = meteotool.MeteoTools(args["file"])
lat, lon = meteo_tool.get_lat_lon()
isobaric_surface = (850, 500, 300)
#label_upper = (30, 0)
#lebel_min = (-30, -60)
for i, pressure in enumerate(isobaric_surface):
# get parameter
temp_c = meteo_tool.get_parameter('t', isobaric_surface=pressure) - 273.15
rh = meteo_tool.get_parameter('r', isobaric_surface=pressure)
height_gpm = meteo_tool.get_parameter('gh', isobaric_surface=pressure)
u_wind = meteo_tool.get_parameter('u', isobaric_surface=pressure)
v_wind = meteo_tool.get_parameter('v', isobaric_surface=pressure)
jp_map = japanmap.JpMap()
jp_map.contour_plot(lon, lat, height_gpm)
#jp_map.shade_plot(lon, lat, temp_c,
# label="2m temperature ($^\circ$C)",
# color_bar_label_max=label_upper[i],
# color_bar_label_min=lebel_min[i],
# color_map_type="temperature",
# double_color_bar=True,)
jp_map.shade_plot(lon, lat, rh,
label="relative humidity (%)",
color_bar_label_max=100,
color_bar_label_min=0,
color_map_type="gray",
double_color_bar=False,)
jp_map.vector_plot(lon, lat, u_wind, v_wind,
vector_interval=5, vector_scale=10, mode="wind")
#jp_map.gray_shade(lon, lat, rh,
# label="relative humidity (%)",
# color_bar_label_max=100,
# color_bar_label_min=0,
# )
if pressure == 850:
jp_map.color_line(lon, lat, temp_c, line_value=-6, color='#0000ff')
if pressure == 500:
jp_map.color_line(lon, lat, temp_c, line_value=-36, color='#b22222')
outname = output_name(args["file"], pressure)
print(outname)
jp_map.save_fig(outname, str(pressure) + "hPa")
if __name__ == "__main__":
main()
| 31.040816
| 82
| 0.575608
| 364
| 3,042
| 4.508242
| 0.340659
| 0.100548
| 0.043876
| 0.063985
| 0.171846
| 0.141377
| 0.120658
| 0.120658
| 0.120658
| 0.120658
| 0
| 0.028869
| 0.305391
| 3,042
| 97
| 83
| 31.360825
| 0.747752
| 0.283037
| 0
| 0
| 0
| 0
| 0.043851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.047619
| 0
| 0.166667
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab4e78536a96c9504186fa7b02c118e2936a403
| 1,406
|
py
|
Python
|
code_week19_831_96/biao_shi_shu_zi.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week19_831_96/biao_shi_shu_zi.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
code_week19_831_96/biao_shi_shu_zi.py
|
dylanlee101/leetcode
|
b059afdadb83d504e62afd1227107de0b59557af
|
[
"Apache-2.0"
] | null | null | null |
'''
请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。例如,字符串"+100"、"5e2"、"-123"、"3.1416"、"-1E-16"、"0123"都表示数值,但"12e"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof
'''
class Solution:
def isNumber(self, s: str) -> bool:
states = [
{ ' ': 0, 's': 1, 'd': 2, '.': 4 }, # 0. start with 'blank'
{ 'd': 2, '.': 4 } , # 1. 'sign' before 'e'
{ 'd': 2, '.': 3, 'e': 5, ' ': 8 }, # 2. 'digit' before 'dot'
{ 'd': 3, 'e': 5, ' ': 8 }, # 3. 'digit' after 'dot'
{ 'd': 3 }, # 4. 'digit' after 'dot' (‘blank’ before 'dot')
{ 's': 6, 'd': 7 }, # 5. 'e'
{ 'd': 7 }, # 6. 'sign' after 'e'
{ 'd': 7, ' ': 8 }, # 7. 'digit' after 'e'
{ ' ': 8 } # 8. end with 'blank'
]
p = 0 # start with state 0
for c in s:
if '0' <= c <= '9': t = 'd' # digit
elif c in "+-": t = 's' # sign
elif c in "eE": t = 'e' # e or E
elif c in ". ": t = c # dot, blank
else: t = '?' # unknown
if t not in states[p]: return False
p = states[p][t]
return p in (2, 3, 7, 8)
| 45.354839
| 129
| 0.345661
| 183
| 1,406
| 2.655738
| 0.437158
| 0.024691
| 0.04321
| 0.016461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090793
| 0.443812
| 1,406
| 30
| 130
| 46.866667
| 0.530691
| 0.341394
| 0
| 0
| 0
| 0
| 0.035437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab611b64794b954266ea15a077d39ba3447ef27
| 13,211
|
py
|
Python
|
teeth_overlord/tests/unit/networks/neutron.py
|
rackerlabs/teeth-overlord
|
d76f6a03853d964b556aa1aa0f7011b4d1a6f208
|
[
"Apache-2.0"
] | null | null | null |
teeth_overlord/tests/unit/networks/neutron.py
|
rackerlabs/teeth-overlord
|
d76f6a03853d964b556aa1aa0f7011b4d1a6f208
|
[
"Apache-2.0"
] | null | null | null |
teeth_overlord/tests/unit/networks/neutron.py
|
rackerlabs/teeth-overlord
|
d76f6a03853d964b556aa1aa0f7011b4d1a6f208
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2013 Rackspace, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from teeth_overlord import config
from teeth_overlord.networks import neutron
from teeth_overlord import tests
from keystoneclient.apiclient import exceptions as keystone_exceptions
from keystoneclient.v2_0 import client as keystone_client
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.neutron import client as neutron_client
NETWORK1_RESPONSE = {
u'status': u'ACTIVE',
u'subnets': [u'SUBNET1'],
u'name': u'private',
u'provider:physical_network': None,
u'admin_state_up': True,
u'tenant_id': u'TENANTID',
u'provider:network_type': u'local',
u'router:external': False,
u'shared': False,
u'id': u'NETWORK1',
u'provider:segmentation_id': None
}
NETWORK2_RESPONSE = {
u'status': u'ACTIVE',
u'subnets': [u'SUBNET2'],
u'name': u'public',
u'provider:physical_network': None,
u'admin_state_up': True,
u'tenant_id': u'TENANTID',
u'provider:network_type': u'local',
u'router:external': True,
u'shared': False,
u'id': u'NETWORK2',
u'provider:segmentation_id': None
}
PORT1_RESPONSE = {
u'status': u'ACTIVE',
u'binding:host_id': u'precise64',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'NETWORK1',
u'tenant_id': u'TENANTID',
u'extra_dhcp_opts': [],
u'binding:vif_type': u'ovs',
u'device_owner': u'network:dhcp',
u'binding:capabilities': {u'port_filter': True},
u'mac_address': u'fa:16:3e:e0:d4:63',
u'fixed_ips': [
{
u'subnet_id': u'SUBNET1',
u'ip_address': u'10.0.0.3'
}
],
u'id': u'PORT1',
u'security_groups': [],
u'device_id': u''
}
PORT2_RESPONSE = {
u'status': u'DOWN',
u'binding:host_id': u'',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'NETWORK2',
u'tenant_id': u'TENANTID',
u'extra_dhcp_opts': [],
u'binding:vif_type': u'unbound',
u'device_owner': u'',
u'binding:capabilities': {u'port_filter': False},
u'mac_address': u'00:09:7b:3e:18:ca',
u'fixed_ips': [
{
u'subnet_id': u'SUBNET2',
u'ip_address': u'192.168.27.3'
}
],
u'id': u'PORT2',
u'security_groups': [u'SECGRP'],
u'device_id': u''
}
SUBNET1_RESPONSE = {
u'name': u'private-subnet',
u'enable_dhcp': True,
u'network_id': u'NETWORK1',
u'tenant_id': u'TENANTID',
u'dns_nameservers': [],
u'allocation_pools': [
{
u'start': u'10.0.0.2',
u'end': u'10.0.0.254'
}
],
u'host_routes': [],
u'ip_version': 4,
u'gateway_ip': u'10.0.0.1',
u'cidr': u'10.0.0.0/24',
u'id': u'SUBNET1'
}
SUBNET2_RESPONSE = {
u'name': u'public-subnet',
u'enable_dhcp': False,
u'network_id': u'NETWORK2',
u'tenant_id': u'TENANTID',
u'dns_nameservers': [],
u'allocation_pools': [
{
u'start': u'192.168.27.1',
u'end': u'192.168.27.1'
},
{
u'start': u'192.168.27.3',
u'end': u'192.168.27.254'
}
],
u'host_routes': [],
u'ip_version': 4,
u'gateway_ip': u'192.168.27.2',
u'cidr': u'192.168.27.0/24',
u'id': u'SUBNET2'
}
SERIALIZED_NETWORK1 = collections.OrderedDict([
('id', u'NETWORK1'),
('name', u'private'),
('status', u'ACTIVE'),
('subnets', [
collections.OrderedDict([
('id', u'SUBNET1'),
('name', u'private-subnet'),
('ip_version', 4),
('gateway_ip', u'10.0.0.1'),
('cidr', u'10.0.0.0/24'),
('enable_dhcp', True)
])
])
])
SERIALIZED_NETWORK2 = collections.OrderedDict([
('id', u'NETWORK2'),
('name', u'public'),
('status', u'ACTIVE'),
('subnets', [
collections.OrderedDict([
('id', u'SUBNET2'),
('name', u'public-subnet'),
('ip_version', 4),
('gateway_ip', u'192.168.27.2'),
('cidr', u'192.168.27.0/24'),
('enable_dhcp', False)
])
])
])
SERIALIZED_PORT1 = collections.OrderedDict([
('id', u'PORT1'),
('name', u''),
('status', u'ACTIVE'),
('mac_address', u'fa:16:3e:e0:d4:63'),
('fixed_ips', [
{
u'subnet_id': u'SUBNET1',
u'ip_address': u'10.0.0.3'
}
]),
('network', SERIALIZED_NETWORK1)
])
class TestNeutronProvider(tests.TeethMockTestUtilities):
def setUp(self):
super(TestNeutronProvider, self).setUp()
self.config = config.LazyConfig(config={
'KEYSTONE_USER': 'user',
'KEYSTONE_PASS': 'pass',
'KEYSTONE_TENANT_ID': 'tenant',
'KEYSTONE_AUTH_URL': 'auth_url',
'NEUTRON_VERSION': '2.0',
'NEUTRON_URL': 'neutron_url',
'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f',
'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',
'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',
})
self.neutron_client_mock = self.add_mock(neutron_client, 'Client')
self.neutron_mock = self.neutron_client_mock.return_value
self.keystone_client_mock = self.add_mock(keystone_client, 'Client')
self.keystone_client_mock.return_value.auth_token = 'auth_token'
self.provider = neutron.NeutronProvider(self.config)
def test_get_auth_token(self):
t = self.provider._get_auth_token()
self.assertEqual(t, 'auth_token')
self.keystone_client_mock.assert_called_with(
username='user',
password='pass',
tenant_id='tenant',
auth_url='auth_url'
)
def test_get_auth_token_client_exception(self):
exc = keystone_exceptions.ClientException
self.keystone_client_mock.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider._get_auth_token)
def test_get_neutron_client(self):
self.provider._get_neutron_client()
self.neutron_client_mock.assert_called_with(
'2.0',
endpoint_url='neutron_url',
token='auth_token'
)
def test_get_neutron_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_client_mock.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider._get_neutron_client)
def test_list_networks(self):
networks = {'networks': [NETWORK1_RESPONSE,
NETWORK2_RESPONSE]}
self.neutron_mock.list_networks.return_value = networks
self.neutron_mock.show_subnet.side_effect = [
{'subnet': SUBNET1_RESPONSE},
{'subnet': SUBNET2_RESPONSE}
]
networks = self.provider.list_networks()
results = [
SERIALIZED_NETWORK1,
SERIALIZED_NETWORK2
]
self.assertEqual([n.serialize() for n in networks], results)
def test_list_networks_empty(self):
self.neutron_mock.list_networks.return_value = {'networks': []}
networks = self.provider.list_networks()
self.neutron_mock.list_networks.assert_called()
self.assertEqual(networks, [])
def test_list_networks_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_mock.list_networks.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.list_networks)
def test_get_network_info(self):
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
self.neutron_mock.show_subnet.side_effect = [
{'subnet': SUBNET1_RESPONSE}
]
network = self.provider.get_network_info('NETWORK1')
self.assertEqual(network.serialize(), SERIALIZED_NETWORK1)
self.neutron_mock.show_network.assert_called_with('NETWORK1')
def test_get_network_info_does_not_exist(self):
exc = neutron_exceptions.NeutronException()
exc.message = '404 Not Found'
self.neutron_mock.show_network.side_effect = exc
self.assertRaises(self.provider.NetworkDoesNotExist,
self.provider.get_network_info,
'NETWORK1')
def test_get_network_info_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_mock.show_network.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.get_network_info,
'NETWORK1')
def test_list_ports(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
ports = self.provider.list_ports('a:b:c:d')
self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1])
self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d')
def test_attach(self):
port = {'port': PORT1_RESPONSE}
self.neutron_mock.create_port.return_value = port
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
port = self.provider.attach('a:b:c:d', 'network_id')
self.neutron_mock.create_port.assert_called_with({
'port': {
'network_id': 'network_id',
'admin_state_up': True,
'mac_address': 'a:b:c:d'
}
})
self.assertEqual(port.serialize(), SERIALIZED_PORT1)
def test_attach_client_exception(self):
exc = neutron_exceptions.NeutronException()
self.neutron_mock.create_port.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.attach,
'mac_address', 'network_id')
def test_detatch(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
self.provider.detach('a:b:c:d')
self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id'])
self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d')
def test_detach_specific_network(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
self.provider.detach('a:b:c:d', 'network_id')
self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id'])
self.neutron_mock.list_ports.assert_called_with(
mac_address='a:b:c:d', network_id='network_id')
def test_detach_client_exception(self):
ports = {'ports': [PORT1_RESPONSE]}
self.neutron_mock.list_ports.return_value = ports
network = {'network': NETWORK1_RESPONSE}
self.neutron_mock.show_network.return_value = network
subnet = {'subnet': SUBNET1_RESPONSE}
self.neutron_mock.show_subnet.return_value = subnet
exc = neutron_exceptions.NeutronException()
self.neutron_mock.delete_port.side_effect = exc
self.assertRaises(self.provider.NetworkProviderException,
self.provider.detach,
'a:b:c:d')
def test_get_default_networks(self):
network_ids = self.provider.get_default_networks()
self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK,
self.config.NEUTRON_PRIVATE_NETWORK])
def test_get_service_network(self):
network_id = self.provider.get_service_network()
self.assertEqual(network_id, self.config.NEUTRON_SERVICE_NETWORK)
| 32.221951
| 78
| 0.626448
| 1,608
| 13,211
| 4.901741
| 0.149876
| 0.053032
| 0.064704
| 0.049607
| 0.596803
| 0.527531
| 0.476909
| 0.44202
| 0.402563
| 0.383659
| 0
| 0.030446
| 0.246688
| 13,211
| 409
| 79
| 32.300734
| 0.761555
| 0.041935
| 0
| 0.383436
| 0
| 0
| 0.191557
| 0.028303
| 0
| 0
| 0
| 0
| 0.076687
| 1
| 0.058282
| false
| 0.006135
| 0.02454
| 0
| 0.08589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ab7ab472dc6bde156894c22490a3de97781b2d7
| 4,508
|
py
|
Python
|
typeidea/blog/views.py
|
Phoenix-sy/typeidea
|
e913218872c7f4e9afc290eb42b4ca8c8e4523be
|
[
"MIT"
] | null | null | null |
typeidea/blog/views.py
|
Phoenix-sy/typeidea
|
e913218872c7f4e9afc290eb42b4ca8c8e4523be
|
[
"MIT"
] | 4
|
2020-06-06T01:37:34.000Z
|
2021-09-08T01:49:56.000Z
|
typeidea/blog/views.py
|
Phoenix-sy/typeidea
|
e913218872c7f4e9afc290eb42b4ca8c8e4523be
|
[
"MIT"
] | null | null | null |
from datetime import date
from django.core.cache import cache
from django.db.models import Q, F
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
#from silk.profiling.profiler import silk_profile
from config.models import SideBar
from .models import Post, Tag, Category
from comment.models import Comment
class CommonViewMinxin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'sidebars': self.get_sidebars(),
})
context.update(self.get_navs())
return context
def get_sidebars(self):
return SideBar.objects.filter(status=SideBar.STATUS_SHOW)
def get_navs(self):
categories = Category.objects.filter(status=Category.STATUS_NORMAL)
nav_categories = []
normal_categories = []
for cate in categories:
if cate.is_nav:
nav_categories.append(cate)
else:
normal_categories.append(cate)
return {
'navs': nav_categories,
'categories': normal_categories,
}
class IndexView(CommonViewMinxin, ListView):
queryset = Post.latest_posts()
paginate_by = 5
context_object_name = 'post_list'
template_name = 'blog/list.html'
class CategoryView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_id = self.kwargs.get('category_id')
category = get_object_or_404(Category, pk=category_id)
context.update({
'category': category,
})
return context
def get_queryset(self):
'''重写queryset,根据分类过滤'''
queryset = super().get_queryset()
category_id = self.kwargs.get('category_id')
return queryset.filter(category_id=category_id)
class TagView(IndexView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tag_id = self.kwargs.get('tag_id')
tag = get_object_or_404(Tag, pk=tag_id)
context.update({
'tag': tag,
})
return context
def get_queryset(self):
'''重写queryset,根据标签过滤'''
queryset = super().get_queryset()
tag_id = self.kwargs.get('tag_id')
return queryset.filter(tag__id=tag_id)
class PostDetailView(CommonViewMinxin, DetailView):
queryset = Post.latest_posts()
template_name = 'blog/detail.html'
context_object_name = 'post'
pk_url_kwarg = 'post_id'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
self.handle_visited()
return response
def handle_visited(self):
increase_pv = False
increase_uv = False
uid = self.request.uid
pv_key = 'pv:%s:%s' % (uid, self.request.path)
uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path)
if not cache.get(pv_key):
increase_pv = True
cache.set(pv_key, 1, 1*60) #1分钟有效
if not cache.get(uv_key):
increase_uv = True
cache.set(uv_key, 1, 24*60*60)
if increase_pv and increase_uv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1,
uv=F('uv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1)
elif increase_pv:
Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1)
class SearchView(IndexView):
def get_context_data(self):
context = super().get_context_data()
context.update({
'keyword': self.request.GET.get('keyword', '')
})
return context
def get_queryset(self):
queryset = super().get_queryset()
keyword = self.request.GET.get('keyword')
if not keyword:
return queryset
return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains
=keyword))
class AuthorView(IndexView):
def get_queryset(self):
queryset = super().get_queryset()
author_id = self.kwargs.get('owner_id')
return queryset.filter(owner_id=author_id)
'''
def post_list(request, category_id=None, tag_id=None):
tag = None
category = None
if tag_id:
post_list, tag = Post.get_by_tag(tag_id)
elif category_id:
post_list, category=Post.get_by_category(category_id)
else:
post_list = Post.latest_posts()
context = {
'category': category,
'tag': tag,
'post_list': post_list,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/list.html', context=context)
def post_detail(request, post_id=None):
try:
post = Post.objects.get(id=post_id)
except Post.DoesNotExist:
raise Http404('Post does not exist!')
context={
'post': post,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/detail.html', context=context)
'''
| 24.367568
| 72
| 0.717613
| 635
| 4,508
| 4.900787
| 0.190551
| 0.021208
| 0.03599
| 0.0241
| 0.290167
| 0.281812
| 0.248072
| 0.185733
| 0.15874
| 0.15874
| 0
| 0.007548
| 0.147737
| 4,508
| 184
| 73
| 24.5
| 0.802447
| 0.019743
| 0
| 0.309091
| 0
| 0
| 0.048056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.081818
| 0.009091
| 0.436364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0aba3f90d5e6185589e45a9a8d8d372bccb752c2
| 764
|
py
|
Python
|
tests/test_processor.py
|
vijithv/djangosaml2idp
|
8a238063da55bf4823bdc2192168171767c4e056
|
[
"Apache-2.0"
] | 1
|
2021-11-03T17:53:29.000Z
|
2021-11-03T17:53:29.000Z
|
tests/test_processor.py
|
vijithv/djangosaml2idp
|
8a238063da55bf4823bdc2192168171767c4e056
|
[
"Apache-2.0"
] | null | null | null |
tests/test_processor.py
|
vijithv/djangosaml2idp
|
8a238063da55bf4823bdc2192168171767c4e056
|
[
"Apache-2.0"
] | 1
|
2020-04-23T03:52:10.000Z
|
2020-04-23T03:52:10.000Z
|
from django.contrib.auth import get_user_model
from djangosaml2idp.processors import BaseProcessor
User = get_user_model()
class TestBaseProcessor:
def test_extract_user_id_configure_by_user_class(self):
user = User()
user.USERNAME_FIELD = 'email'
user.email = 'test_email'
assert BaseProcessor('entity-id').get_user_id(user) == 'test_email'
def test_extract_user_id_configure_by_settings(self, settings):
"""Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field"""
settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name'
user = User()
user.first_name = 'test_first_name'
assert BaseProcessor('entity-id').get_user_id(user) == 'test_first_name'
| 29.384615
| 97
| 0.722513
| 100
| 764
| 5.14
| 0.35
| 0.058366
| 0.046693
| 0.070039
| 0.424125
| 0.424125
| 0.291829
| 0.171206
| 0.171206
| 0
| 0
| 0.00161
| 0.187173
| 764
| 25
| 98
| 30.56
| 0.826087
| 0.108639
| 0
| 0.142857
| 0
| 0
| 0.122963
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abaca3d1ed91ca49de4c9b160592c473142f544
| 1,840
|
py
|
Python
|
com/ds/SingleLinkedList.py
|
sasikrishna/python-programs
|
937002f37c86efc5c876b37c7b42634ca629fffc
|
[
"MIT"
] | null | null | null |
com/ds/SingleLinkedList.py
|
sasikrishna/python-programs
|
937002f37c86efc5c876b37c7b42634ca629fffc
|
[
"MIT"
] | null | null | null |
com/ds/SingleLinkedList.py
|
sasikrishna/python-programs
|
937002f37c86efc5c876b37c7b42634ca629fffc
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
class SingleLinkedList:
def __init__(self):
self.head = None
def add(self, ele):
new_node = Node(ele)
if self.head is None:
self.head = new_node
return
temp_head = self.head
while temp_head.next is not None:
temp_head = temp_head.next;
temp_head.next = new_node;
def contains(self, ele):
temp_head = self.head
while temp_head is not None:
if temp_head.data == ele:
return True
temp_head = temp_head.next
return False
def remove(self, ele):
if self.head is None:
return;
if self.head.data == ele:
self.head = self.head.next
return True
temp_head = self.head.next
prev_node = temp_head
is_node_deleted = False
while temp_head is not None:
if temp_head.data == ele:
is_node_deleted = True
prev_node.next = temp_head.next
break
prev_node = temp_head
temp_head = temp_head.next
return is_node_deleted
def print_list(self):
temp_head = self.head
while temp_head is not None:
print(temp_head.data)
temp_head = temp_head.next
if __name__ == '__main__':
list = SingleLinkedList();
list.add(5)
list.add(4)
list.add(12)
list.add(13)
list.add(19)
list.print_list();
print("List contains element 4", list.contains(4))
print("List contains element 6", list.contains(6))
print("Removing element 13", list.remove(13))
list.print_list();
print("List contains element 13", list.contains(13))
| 23.896104
| 56
| 0.563043
| 240
| 1,840
| 4.091667
| 0.158333
| 0.187373
| 0.08554
| 0.081466
| 0.37169
| 0.330957
| 0.239308
| 0.13442
| 0.13442
| 0.13442
| 0
| 0.016736
| 0.350543
| 1,840
| 76
| 57
| 24.210526
| 0.805021
| 0
| 0
| 0.322034
| 0
| 0
| 0.052746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0
| 0
| 0.220339
| 0.135593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abb4de3626dcbaf10f7a01c7d732b38a10d112a
| 3,453
|
py
|
Python
|
fs/error_tools.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
fs/error_tools.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
fs/error_tools.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
"""Tools for managing OS errors.
"""
from __future__ import print_function
from __future__ import unicode_literals
import errno
from contextlib import contextmanager
import sys
import platform
from . import errors
from six import reraise
_WINDOWS_PLATFORM = platform.system() == 'Windows'
class _ConvertOSErrors(object):
"""Context manager to convert OSErrors in to FS Errors.
"""
FILE_ERRORS = {
64: errors.RemoteConnectionError, # ENONET
errno.EACCES: errors.PermissionDenied,
errno.ENOENT: errors.ResourceNotFound,
errno.EFAULT: errors.ResourceNotFound,
errno.ESRCH: errors.ResourceNotFound,
errno.ENOTEMPTY: errors.DirectoryNotEmpty,
errno.EEXIST: errors.FileExists,
183: errors.DirectoryExists,
#errno.ENOTDIR: errors.DirectoryExpected,
errno.ENOTDIR: errors.ResourceNotFound,
errno.EISDIR: errors.FileExpected,
errno.EINVAL: errors.FileExpected,
errno.ENOSPC: errors.InsufficientStorage,
errno.EPERM: errors.PermissionDenied,
errno.ENETDOWN: errors.RemoteConnectionError,
errno.ECONNRESET: errors.RemoteConnectionError,
errno.ENAMETOOLONG: errors.PathError,
errno.EOPNOTSUPP: errors.Unsupported,
errno.ENOSYS: errors.Unsupported,
}
DIR_ERRORS = FILE_ERRORS.copy()
DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected
DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists
DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected
if _WINDOWS_PLATFORM: # pragma: no cover
DIR_ERRORS[13] = errors.DirectoryExpected
DIR_ERRORS[267] = errors.DirectoryExpected
FILE_ERRORS[13] = errors.FileExpected
def __init__(self, opname, path, directory=False):
self._opname = opname
self._path = path
self._directory = directory
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
os_errors = (
self.DIR_ERRORS
if self._directory
else self.FILE_ERRORS
)
if exc_type and isinstance(exc_value, EnvironmentError):
_errno = exc_value.errno
fserror = os_errors.get(_errno, errors.OperationFailed)
if _errno == errno.EACCES and sys.platform == "win32":
if getattr(exc_value, 'args', None) == 32: # pragma: no cover
fserror = errors.ResourceLocked
reraise(
fserror,
fserror(
self._path,
exc=exc_value
),
traceback
)
# Stops linter complaining about invalid class name
convert_os_errors = _ConvertOSErrors
@contextmanager
def unwrap_errors(path_replace):
"""Get a context to map OS errors to their `fs.errors` counterpart.
The context will re-write the paths in resource exceptions to be
in the same context as the wrapped filesystem.
The only parameter may be the path from the parent, if only one path
is to be unwrapped. Or it may be a dictionary that maps wrapped
paths on to unwrapped paths.
"""
try:
yield
except errors.ResourceError as e:
if hasattr(e, 'path'):
if isinstance(path_replace, dict):
e.path = path_replace.get(e.path, e.path)
else:
e.path = path_replace
reraise(type(e), e)
| 31.390909
| 78
| 0.650449
| 375
| 3,453
| 5.821333
| 0.376
| 0.028859
| 0.049473
| 0.032066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006385
| 0.274254
| 3,453
| 109
| 79
| 31.678899
| 0.864725
| 0.162178
| 0
| 0
| 0
| 0
| 0.007027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0.013158
| 0.210526
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abcb370c0d40bd870443ed0b022026c144555c8
| 3,829
|
py
|
Python
|
python/index.py
|
stijnvanhulle/EscapeGame
|
ae3e35334d64394a0f696149bfd56c1fd7a97681
|
[
"MIT"
] | 1
|
2020-08-16T02:52:06.000Z
|
2020-08-16T02:52:06.000Z
|
python/index.py
|
stijnvanhulle/EscapeGame
|
ae3e35334d64394a0f696149bfd56c1fd7a97681
|
[
"MIT"
] | 1
|
2021-10-18T18:39:08.000Z
|
2021-10-18T18:39:08.000Z
|
python/index.py
|
stijnvanhulle/EscapeGame
|
ae3e35334d64394a0f696149bfd56c1fd7a97681
|
[
"MIT"
] | null | null | null |
# @Author: Stijn Van Hulle <stijnvanhulle>
# @Date: 2016-11-28T13:51:38+01:00
# @Email: [email protected]
# @Last modified by: stijnvanhulle
# @Last modified time: 2016-12-20T12:51:07+01:00
# @License: stijnvanhulle.be
#!/usr/bin/env python
import time
import datetime
import math
import sys
import json
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import lib.faceDetection as faceDetection
import lib.levelCalculation as levelCalculation
MQTT_BROKER="localhost"
client = mqtt.Client()
#classes
def on_connect(client, userdata, rc):
print("Connected to MQTT-broker on " + MQTT_BROKER )
client.subscribe("online")
client.subscribe("message")
client.subscribe("detection_find")
client.subscribe("detection_found")
client.subscribe("recalculate_start")
client.subscribe("recalculate_done")
def on_message(client, userdata, msg):
try:
parsed_json=json.loads(convertJson(msg.payload))
if msg.topic=="detection_find":
print(parsed_json)
_image1 =parsed_json['image1']
_image2 =parsed_json['image2']
_read=parsed_json['read']
if _read:
if _image1 is not None and _image2 is not None:
percent=faceDetection.getDifference(_image1,_image2)
print('Detection:' + str(percent))
client.publish("detection_found", makeJsonObject_detection(percent,_image1,_image2,_read))
if msg.topic=="recalculate_start":
print(parsed_json)
_data =parsed_json['data']
_file=parsed_json['file']
if _data is not None:
calcObj=levelCalculation.calculate(_data,_file)
print('CalculatedOBJ:' + str(calcObj))
client.publish("recalculate_done", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score']))
except Exception as error:
print('Error:',error)
def convertJson(data):
data=data.decode()
if data.startswith("'") and data.endswith("'"):
data = data[1:-1]
print(data)
return data
def makeJsonOnlineObject(device=''):
item=json.dumps({"device":device})
return str(item)
def init():
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(MQTT_BROKER, 1883, 60)
client.loop_start()
time.sleep(0.2)
client.publish("online", makeJsonOnlineObject('FaceDetection'))
def makeJsonObject(value=None,port=None,type=None,read=False):
item=json.dumps({"port":port, "type":type,"value":value,"read":read})
return str(item)
def makeJsonObject_detection(value=None,image1=None,image2=None,read=False):
item=json.dumps({"value":value, "image1":image1,"image2":image2, "read":read})
return str(item)
def makeJsonObject_levelCalculate(data=None,score=0):
item=json.dumps({"data":data,"score":score})
return str(item)
def main():
init()
while True:
time.sleep(0.1)
data = input("Code:")
if data is not None:
try:
if data=='exit':
exit()
sys.exit(0)
else:
parsed_json=json.loads(convertJson(msg.payload))
_type =parsed_json['type']
_port=parsed_json['port']
_read=parsed_json['read']
if _type is not None and _port is not None and _read is not None:
item=str(json.dumps(parsed_json))
print(item)
#client.publish("message",item)
client.publish("detection",item)
else:
throw('Not correct data')
except Exception as error:
print('Error:',error)
if __name__ == '__main__':
try:
if len(sys.argv)>1:
MQTT_BROKER=sys.argv[1]
else:
input_text = input("Ip of MQTT-broker: ")
if input_text:
MQTT_BROKER=input_text
#executor = ProcessPoolExecutor(2)
#loop = trollius.get_event_loop()
#_main = trollius.async(loop.run_in_executor(executor, main))
main()
except (TypeError) as ex:
error="Error: " + str(ex)
#print(error)
except (KeyboardInterrupt):
exit()
print("\nIOT is afgesloten\n")
sys.exit(0)
except (SystemExit):
print("\nIOT is geforceert afgelosten\n")
| 25.357616
| 105
| 0.71298
| 517
| 3,829
| 5.133462
| 0.274662
| 0.048983
| 0.023738
| 0.024115
| 0.13263
| 0.106255
| 0.086662
| 0
| 0
| 0
| 0
| 0.021413
| 0.146252
| 3,829
| 150
| 106
| 25.526667
| 0.790456
| 0.106294
| 0
| 0.224299
| 0
| 0
| 0.132042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084112
| false
| 0
| 0.084112
| 0
| 0.214953
| 0.102804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abd370b6b3c7d06f851a685777b6e689527ccf7
| 8,184
|
py
|
Python
|
peps/converters.py
|
idjaw/pythondotorg
|
8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649
|
[
"Apache-2.0"
] | null | null | null |
peps/converters.py
|
idjaw/pythondotorg
|
8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649
|
[
"Apache-2.0"
] | 2
|
2022-01-13T03:57:42.000Z
|
2022-03-12T01:01:40.000Z
|
peps/converters.py
|
idjaw/pythondotorg
|
8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from pages.models import Page, Image
PEP_TEMPLATE = 'pages/pep-page.html'
pep_url = lambda num: 'dev/peps/pep-{}/'.format(num)
def check_paths():
""" Checks to ensure our PEP_REPO_PATH is setup correctly """
if not hasattr(settings, 'PEP_REPO_PATH'):
raise ImproperlyConfigured("No PEP_REPO_PATH in settings")
if not os.path.exists(settings.PEP_REPO_PATH):
raise ImproperlyConfigured("PEP_REPO_PATH in settings does not exist")
def convert_pep0():
"""
Take existing generated pep-0000.html and convert to something suitable
for a Python.org Page returns the core body HTML necessary only
"""
check_paths()
pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html')
pep0_content = open(pep0_path).read()
soup = BeautifulSoup(pep0_content)
body_children = list(soup.body.children)
# Grab header and PEP body
header = body_children[3]
pep_content = body_children[7]
# Fix PEP links
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
# Remove Version from header
header_rows = header.find_all('th')
for t in header_rows:
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return ''.join([header.prettify(), pep_content.prettify()])
def get_pep0_page(commit=True):
"""
Using convert_pep0 above, create a CMS ready pep0 page and return it
pep0 is used as the directory index, but it's also an actual pep, so we
return both Page objects.
"""
pep0_content = convert_pep0()
pep0_page, _ = Page.objects.get_or_create(path='dev/peps/')
pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/')
for page in [pep0_page, pep0000_page]:
page.content = pep0_content
page.content_markup_type = 'html'
page.title = "PEP 0 -- Index of Python Enhancement Proposals (PEPs)"
page.template_name = PEP_TEMPLATE
if commit:
page.save()
return pep0_page, pep0000_page
def fix_headers(soup, data):
""" Remove empty or unwanted headers and find our title """
header_rows = soup.find_all('th')
for t in header_rows:
if 'Version:' in t.text:
if t.next_sibling.text == '$Revision$':
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if 'Last-Modified:' in t.text:
if '$Date$'in t.next_sibling.text:
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if t.text == 'Title:':
data['title'] = t.next_sibling.text
if t.text == 'Content-Type:':
t.parent.extract()
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return soup, data
def convert_pep_page(pep_number, content):
"""
Handle different formats that pep2html.py outputs
"""
check_paths()
data = {
'title': None,
}
if '<html>' in content:
soup = BeautifulSoup(content)
data['title'] = soup.title.text
if not re.search(r'PEP \d+', data['title']):
data['title'] = 'PEP {} -- {}'.format(
pep_number,
soup.title.text,
)
header = soup.body.find('div', class_="header")
header, data = fix_headers(header, data)
data['header'] = header.prettify()
main_content = soup.body.find('div', class_="content")
data['main_content'] = main_content.prettify()
data['content'] = ''.join([
data['header'],
data['main_content']
])
else:
soup = BeautifulSoup(content)
soup, data = fix_headers(soup, data)
if not data['title']:
data['title'] = "PEP {} -- ".format(pep_number)
else:
if not re.search(r'PEP \d+', data['title']):
data['title'] = "PEP {} -- {}".format(
pep_number,
data['title'],
)
data['content'] = soup.prettify()
# Fix PEP links
pep_content = BeautifulSoup(data['content'])
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
data['content'] = pep_content.prettify()
hg_link = "https://hg.python.org/peps/file/tip/pep-{0}.txt".format(pep_number)
data['content'] += """Source: <a href="{0}">{0}</a>""".format(hg_link)
return data
def get_pep_page(pep_number, commit=True):
"""
Given a pep_number retrieve original PEP source text, rst, or html.
Get or create the associated Page and return it
"""
pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number))
if not os.path.exists(pep_path):
print("PEP Path '{}' does not exist, skipping".format(pep_path))
pep_content = convert_pep_page(pep_number, open(pep_path).read())
pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number))
# Remove leading zeros from PEP number for display purposes
pep_number_string = str(pep_number)
pep_number_string = re.sub(r'^0+', '', pep_number_string)
pep_page.title = pep_content['title']
pep_page.content = pep_content['content']
pep_page.content_markup_type = 'html'
pep_page.template_name = PEP_TEMPLATE
if commit:
pep_page.save()
return pep_page
def add_pep_image(pep_number, path):
image_path = os.path.join(settings.PEP_REPO_PATH, path)
if not os.path.exists(image_path):
print("Image Path '{}' does not exist, skipping".format(image_path))
try:
page = Page.objects.get(path=pep_url(pep_number))
except Page.DoesNotExist:
print("Could not find backing PEP {}".format(pep_number))
return
# Find existing images, we have to loop here as we can't use the ORM
# to query against image__path
existing_images = Image.objects.filter(page=page)
MISSING = False
FOUND = False
for image in existing_images:
image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path)
if image.image.path.endswith(path):
FOUND = True
# File is missing on disk, recreate
if not os.path.exists(image_root_path):
MISSING = image
break
if not FOUND or MISSING:
image = None
if MISSING:
image = MISSING
else:
image = Image(page=page)
with open(image_path, 'rb') as image_obj:
image.image.save(path, File(image_obj))
image.save()
# Old images used to live alongside html, but now they're in different
# places, so update the page accordingly.
soup = BeautifulSoup(page.content.raw)
for img_tag in soup.findAll('img'):
if img_tag['src'] == path:
img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path)
page.content.raw = soup.prettify()
page.save()
return image
def get_peps_rss():
rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss')
if not os.path.exists(rss_feed):
return
page, _ = Page.objects.get_or_create(
path="dev/peps/peps.rss",
template_name="pages/raw.html",
)
with open(rss_feed, "r") as rss_content:
content = rss_content.read()
page.content = content
page.is_published = True
page.content_type = "application/rss+xml"
page.save()
return page
| 29.228571
| 85
| 0.615225
| 1,116
| 8,184
| 4.350358
| 0.206093
| 0.033368
| 0.020391
| 0.023069
| 0.342533
| 0.274356
| 0.234809
| 0.208239
| 0.193615
| 0.15242
| 0
| 0.00826
| 0.260386
| 8,184
| 279
| 86
| 29.333333
| 0.793821
| 0.127199
| 0
| 0.254237
| 0
| 0
| 0.121397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045198
| false
| 0
| 0.039548
| 0
| 0.135593
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abe087af168de7f10f0e7fc51d33adc2b129507
| 2,421
|
py
|
Python
|
implementations/python3/tests/CAPDU.py
|
sebastien-riou/SATL
|
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
|
[
"Apache-2.0"
] | 4
|
2020-05-13T10:13:55.000Z
|
2021-10-20T04:43:07.000Z
|
implementations/python3/tests/CAPDU.py
|
TiempoSecure/SATL
|
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
|
[
"Apache-2.0"
] | 4
|
2020-07-22T16:06:31.000Z
|
2021-07-25T19:51:41.000Z
|
implementations/python3/tests/CAPDU.py
|
TiempoSecure/SATL
|
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
|
[
"Apache-2.0"
] | 2
|
2019-05-12T21:15:00.000Z
|
2020-09-23T09:05:24.000Z
|
import os
import pysatl
from pysatl import CAPDU
if __name__ == "__main__":
def check(hexstr, expected):
capdu = CAPDU.from_hexstr(hexstr)
if capdu != expected:
raise Exception("Mismatch for input '"+hexstr+"'\nActual: "+str(capdu)+"\nExpected: "+str(expected))
def gencase(* ,LC ,LE):
assert(LC < 0x10000)
assert(LE <= 0x10000)
data = os.getrandom(LC)
hexstr = "00112233"
case4 = LC>0 and LE>0
case4e = case4 and (LC>0xFF or LE>0x100)
if LC>0:
if LC>0xFF or case4e:
hexstr += "00%04X"%LC
else:
hexstr += "%02X" % LC
hexstr += pysatl.Utils.hexstr(data, separator="")
if LE>0:
if case4e:
if LE == 0x10000:
hexstr += "0000"
else:
hexstr += "%04X"%LE
elif LE == 0x10000:
hexstr += "000000"
elif LE>0x100:
hexstr += "00%04X"%LE
elif LE == 0x100:
hexstr += "00"
else:
hexstr += "%02X" % LE
expected = hexstr
capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE)
hexstr = capdu.to_hexstr()
if hexstr != expected:
raise Exception("Mismatch for LC=%d, LE=%d"%(LC,LE)+"\nActual: "+hexstr+"\nExpected: "+expected)
b = capdu.to_bytes()
assert(type(b) is bytes)
return (hexstr, capdu)
#check __repr__
expected = "pysatl.CAPDU.from_hexstr('00112233015502')"
capdu=None
exec("capdu="+expected)
assert(expected==repr(capdu))
#check well formed inputs
check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
#check we tolerate less well formed inputs
check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("""0x00 0x11 0x22
0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04))
LC_cases = [0,1,2,254,255,256,257,65534,65535]
LE_cases = LC_cases + [65536]
for LC in LC_cases:
for LE in LE_cases:
print(LC,LE)
check(*gencase(LC=LC, LE=LE))
| 32.28
| 114
| 0.53449
| 314
| 2,421
| 4.050955
| 0.283439
| 0.044025
| 0.056604
| 0.070755
| 0.289308
| 0.207547
| 0.207547
| 0.207547
| 0.207547
| 0.183176
| 0
| 0.168498
| 0.32342
| 2,421
| 74
| 115
| 32.716216
| 0.608059
| 0.032631
| 0
| 0.083333
| 0
| 0
| 0.120564
| 0.017956
| 0
| 0
| 0.083369
| 0
| 0.066667
| 1
| 0.033333
| false
| 0
| 0.05
| 0
| 0.1
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abf69ab54ec15326e13cf19d070cb3b005d83d2
| 495
|
py
|
Python
|
mgmt/src/constants.py
|
pcaruana/sombrio
|
3b669fc83e0227a69b673b5555d88e15b55c397c
|
[
"MIT"
] | null | null | null |
mgmt/src/constants.py
|
pcaruana/sombrio
|
3b669fc83e0227a69b673b5555d88e15b55c397c
|
[
"MIT"
] | null | null | null |
mgmt/src/constants.py
|
pcaruana/sombrio
|
3b669fc83e0227a69b673b5555d88e15b55c397c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""
constants.py - Contains all constants used by the device manager
Author:
- Pablo Caruana (pablo dot caruana at gmail dot com)
Date: 12/3/2016
"""
number_of_rows = 3 # total number rows of Index Servers
number_of_links = 5 # number of links to be sent to Crawler
number_of_chunks = 5 # number of chunks to be sent to Index Builder
number_of_comps = 10 # number of components managed by each watchdog
| 38.076923
| 79
| 0.656566
| 74
| 495
| 4.283784
| 0.594595
| 0.176656
| 0.082019
| 0.063091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036827
| 0.286869
| 495
| 12
| 80
| 41.25
| 0.86119
| 0.670707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0abfe16c350b956230d3407edf8eac65ac07365b
| 1,015
|
py
|
Python
|
XDoG/XDoG.py
|
STomoya/sketchify
|
93c068042f02172505457cc15cb0bef673666be3
|
[
"MIT"
] | null | null | null |
XDoG/XDoG.py
|
STomoya/sketchify
|
93c068042f02172505457cc15cb0bef673666be3
|
[
"MIT"
] | null | null | null |
XDoG/XDoG.py
|
STomoya/sketchify
|
93c068042f02172505457cc15cb0bef673666be3
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
def DoG(image, size, sigma, k=1.6, gamma=1.):
g1 = cv2.GaussianBlur(image, (size, size), sigma)
g2 = cv2.GaussianBlur(image, (size, size), sigma*k)
return g1 - gamma * g2
def XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.):
eps /= 255
d = DoG(image, size, sigma, k, gamma)
d /= d.max()
e = 1 + np.tanh(phi * (d - eps))
e[e >= 1] = 1
return e * 255
# This config is found by the author
# modify if not the desired output
XDoG_config = dict(
size=0,
sigma=0.6,
eps=-15,
phi=10e8,
k=2.5,
gamma=0.97
)
def gen_xdog_image(src, dst):
gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE)
# I wanted the gamma between [0.97, 0.98]
# but it depends on the image so I made it move randomly
# comment out if this is not needed
XDoG_config['gamma'] += 0.01 * np.random.rand(1)
dogged = XDoG(gray, **XDoG_config)
cv2.imwrite(dst, dogged)
if __name__ == "__main__":
gen_xdog_image('sample.jpg', 'dog.jpg')
| 26.025641
| 60
| 0.613793
| 173
| 1,015
| 3.508671
| 0.433526
| 0.074135
| 0.069193
| 0.056013
| 0.196046
| 0.108731
| 0
| 0
| 0
| 0
| 0
| 0.062257
| 0.240394
| 1,015
| 39
| 61
| 26.025641
| 0.725032
| 0.193103
| 0
| 0
| 0
| 0
| 0.0369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac1668c9f200fa1e8cd7c054395a35fadf64190
| 8,070
|
py
|
Python
|
lm/validate.py
|
ericlin8545/grover
|
3ac6e506f2e1a859d98cc2c3fb57ba251be31484
|
[
"Apache-2.0"
] | 864
|
2019-06-18T18:53:58.000Z
|
2022-03-04T22:36:52.000Z
|
lm/validate.py
|
ericlin8545/grover
|
3ac6e506f2e1a859d98cc2c3fb57ba251be31484
|
[
"Apache-2.0"
] | 62
|
2019-06-20T19:37:39.000Z
|
2022-02-10T00:14:49.000Z
|
lm/validate.py
|
ericlin8545/grover
|
3ac6e506f2e1a859d98cc2c3fb57ba251be31484
|
[
"Apache-2.0"
] | 224
|
2019-06-18T18:45:56.000Z
|
2022-03-29T17:46:30.000Z
|
# Original work Copyright 2018 The Google AI Language Team Authors.
# Modified work Copyright 2019 Rowan Zellers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from lm.modeling import model_fn_builder, GroverConfig
import tensorflow as tf
from lm.dataloader import input_fn_builder
import numpy as np
import tempfile
import h5py
from google.cloud import storage
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"config_file", 'configs/base.json',
"The config json file corresponding to the pre-trained news model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string(
"validation_name", 'preds.h5',
"Name to use")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained model).")
flags.DEFINE_integer(
"max_seq_length", 1024,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("batch_size", 32, "Batch size used for eval")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
# This is a handy little utility so that we can save the perplexities to TPU
class gcloudwriter():
def __init__(self, gcloud_name):
assert gcloud_name.startswith('gs://')
self.gcloud_name = gcloud_name
bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1)
bucket = storage.Client().get_bucket(bucket_name)
self.blob = bucket.blob(blob_name)
def __enter__(self):
self.tempfile = tempfile.NamedTemporaryFile()
return self.tempfile
def __exit__(self, *args):
self.tempfile.flush()
print("UPLOADING TO {}".format(self.gcloud_name), flush=True)
self.blob.upload_from_filename(self.tempfile.name)
self.tempfile.close()
def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1):
"""
:param array: Single dimension array
:param target: target to search for
:param return_first_match: If true, return the first index that matches, otherwise, return the last one
:param default_value: Index to return if there was no match
:return: index of the first match, or -1 if nothing
"""
assert array.ndim == 1
matching_inds = np.where(array == target)[0]
if len(matching_inds) > 0:
if return_first_match:
return int(matching_inds[0])
else:
return int(matching_inds[-1])
return default_value
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
news_config = GroverConfig.from_json_file(FLAGS.config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(news_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=1e-4,
num_train_steps=0,
num_warmup_steps=0,
use_tpu=FLAGS.use_tpu,
)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
predict_batch_size=FLAGS.batch_size,
params={'model_dir': FLAGS.output_dir}
)
eval_input_fn = input_fn_builder(
input_files=input_files,
seq_length=FLAGS.max_seq_length,
evaluate_for_fixed_number_of_steps=False,
num_cpu_threads=1,
is_training=False)
result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)]
cats = sorted(result[0].keys())
result_stack = {cat: np.stack([x[cat] for x in result]) for cat in cats}
with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name:
with h5py.File(tempfile_name, 'w') as h5:
for cat, data in result_stack.items():
dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16
h5.create_dataset(cat, data=data.astype(dtype2use))
h5.create_dataset('model', data=FLAGS.config_file)
h5.create_dataset('ckpt', data=FLAGS.init_checkpoint)
h5.create_dataset('input_file', data=FLAGS.input_file)
# This gives the perplexity of the entire article. if you want to replicate the results of the paper you
# might need to do something different to extract the ppl of just the body in particular.
ppl_ex = []
for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']):
# Omit the first token. Keep in mind input_ids is shifted by 1
start_ind = ind_where(ids_i, target=50265, default_value=0)
end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1)
ppl_ex.append(logprobs_i[start_ind:end_ind])
ppl_ex = np.concatenate(ppl_ex, 0)
print("Article perplexity is {:.3f}".format(np.exp(-np.mean(ppl_ex))), flush=True)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 37.534884
| 108
| 0.688352
| 1,138
| 8,070
| 4.675747
| 0.318981
| 0.028942
| 0.028754
| 0.010712
| 0.077993
| 0.041721
| 0.033828
| 0.033828
| 0.033828
| 0.033828
| 0
| 0.011569
| 0.218092
| 8,070
| 214
| 109
| 37.71028
| 0.831696
| 0.169021
| 0
| 0.108844
| 0
| 0
| 0.220418
| 0.00436
| 0
| 0
| 0
| 0
| 0.013605
| 1
| 0.034014
| false
| 0
| 0.054422
| 0
| 0.122449
| 0.013605
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac16994f053988d4add08873e022a2c2ce12964
| 5,055
|
py
|
Python
|
robo/fmin/entropy_search.py
|
fuhuifang/RoBo
|
036bbaa0e59032577e2611d8ba304384b397c7f6
|
[
"BSD-3-Clause"
] | null | null | null |
robo/fmin/entropy_search.py
|
fuhuifang/RoBo
|
036bbaa0e59032577e2611d8ba304384b397c7f6
|
[
"BSD-3-Clause"
] | null | null | null |
robo/fmin/entropy_search.py
|
fuhuifang/RoBo
|
036bbaa0e59032577e2611d8ba304384b397c7f6
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import george
import numpy as np
from robo.priors.default_priors import DefaultPrior
from robo.models.gaussian_process import GaussianProcess
from robo.models.gaussian_process_mcmc import GaussianProcessMCMC
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition_functions.information_gain import InformationGain
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.marginalization import MarginalizationGPMCMC
from robo.initial_design import init_latin_hypercube_sampling
logger = logging.getLogger(__name__)
def entropy_search(objective_function, lower, upper, num_iterations=30,
maximizer="random", model="gp_mcmc",
n_init=3, output_path=None, rng=None):
"""
Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search
algorithm by Henning and Schuler[1].
[1] Entropy search for information-efficient global optimization.
P. Hennig and C. Schuler.
JMLR, (1), 2012.
Parameters
----------
objective_function: function
The objective function that is minimized. This function gets a numpy array (D,) as input and returns
the function value (scalar)
lower: np.ndarray (D,)
The lower bound of the search space
upper: np.ndarray (D,)
The upper bound of the search space
num_iterations: int
The number of iterations (initial design + BO)
maximizer: {"random", "scipy", "differential_evolution"}
Defines how the acquisition function is maximized.
model: {"gp", "gp_mcmc"}
The model for the objective function.
n_init: int
Number of points for the initial design. Make sure that it is <= num_iterations.
output_path: string
Specifies the path where the intermediate output after each iteration will be saved.
If None no output will be saved to disk.
rng: numpy.random.RandomState
Random number generator
Returns
-------
dict with all results
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert np.all(lower < upper), "Lower bound >= upper bound"
assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = np.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls,
ndim=n_dims)
kernel = cov_amp * exp_kernel
prior = DefaultPrior(len(kernel) + 1)
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
if model == "gp":
gp = GaussianProcess(kernel, prior=prior, rng=rng,
normalize_output=False, normalize_input=True,
lower=lower, upper=upper)
elif model == "gp_mcmc":
gp = GaussianProcessMCMC(kernel, prior=prior,
n_hypers=n_hypers,
chain_length=200,
burnin_steps=100,
normalize_input=True,
normalize_output=False,
rng=rng, lower=lower, upper=upper)
else:
print("ERROR: %s is not a valid model!" % model)
return
a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)
if model == "gp":
acquisition_func = a
elif model == "gp_mcmc":
acquisition_func = MarginalizationGPMCMC(a)
if maximizer == "random":
max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)
else:
print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer)
return
bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,
initial_design=init_latin_hypercube_sampling,
initial_points=n_init, rng=rng, output_path=output_path)
x_best, f_min = bo.run(num_iterations)
results = dict()
results["x_opt"] = x_best
results["f_opt"] = f_min
results["incumbents"] = [inc for inc in bo.incumbents]
results["incumbent_values"] = [val for val in bo.incumbents_values]
results["runtime"] = bo.runtime
results["overhead"] = bo.time_overhead
results["X"] = [x.tolist() for x in bo.X]
results["y"] = [y for y in bo.y]
return results
| 39.492188
| 112
| 0.656775
| 616
| 5,055
| 5.246753
| 0.314935
| 0.027228
| 0.016708
| 0.02599
| 0.083849
| 0.052908
| 0.043317
| 0.043317
| 0
| 0
| 0
| 0.008833
| 0.26093
| 5,055
| 127
| 113
| 39.80315
| 0.856263
| 0.233037
| 0
| 0.105263
| 0
| 0
| 0.090254
| 0.005875
| 0
| 0
| 0
| 0
| 0.039474
| 1
| 0.013158
| false
| 0
| 0.184211
| 0
| 0.236842
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac365363d4be305aa9c1fbf0e6475792a5ae142
| 253
|
py
|
Python
|
com/bridgelabz/programs/powerof2.py
|
aashishogale/FunctionalPrograms-Python-
|
d297bdb78112ef03274a10a58efc90da27f51b14
|
[
"MIT"
] | null | null | null |
com/bridgelabz/programs/powerof2.py
|
aashishogale/FunctionalPrograms-Python-
|
d297bdb78112ef03274a10a58efc90da27f51b14
|
[
"MIT"
] | null | null | null |
com/bridgelabz/programs/powerof2.py
|
aashishogale/FunctionalPrograms-Python-
|
d297bdb78112ef03274a10a58efc90da27f51b14
|
[
"MIT"
] | null | null | null |
import sys
from com.bridgelabz.utility.Utility import Utility
class PowerOf2:
def start(self):
number=int(sys.argv[1])
print(number)
for i in Utility().powerof2(number):
print(i)
return
PowerOf2().start()
| 23
| 50
| 0.624506
| 32
| 253
| 4.9375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.264822
| 253
| 11
| 51
| 23
| 0.827957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.5
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac3dcb6f4a277998e57f0001095aaf45bef6fae
| 2,256
|
py
|
Python
|
app/main.py
|
MichaelLeeman/Job_Web_Scraper
|
29205d84f1190830a77174ce8272f4f79bb3468b
|
[
"MIT"
] | null | null | null |
app/main.py
|
MichaelLeeman/Job_Web_Scraper
|
29205d84f1190830a77174ce8272f4f79bb3468b
|
[
"MIT"
] | 4
|
2020-05-25T19:54:58.000Z
|
2020-05-25T19:55:03.000Z
|
app/main.py
|
MichaelLeeman/Job_Web_Scraper
|
29205d84f1190830a77174ce8272f4f79bb3468b
|
[
"MIT"
] | 1
|
2020-07-02T13:06:52.000Z
|
2020-07-02T13:06:52.000Z
|
# This program scraps data from job postings on the website workinstartups.com and appends it to an excel worksheet.
import os
from datetime import datetime, timedelta
from selenium import webdriver
from app import web_scraper
from app import excel
job_list, last_date = [], None
file_path = os.path.abspath("main.py").rstrip('/app/main.py') + '//Workbooks' + "//Job_Openings.xlsx"
print("-" * 75, "-" * 75, "\n\t\t\t\t\t\t\t JOB WEB SCRAPER", "-" * 75, "-" * 75, sep="\n")
print("\n")
# If the Job_Openings workbook already exists then append the jobs not already in the worksheet
# by checking the date of the first job in excel, since the last time the site was scraped.
if os.path.isfile(file_path):
print("Job_Opening excel file already exists. Loading workbook.", "-" * 75, sep="\n")
workbook, worksheet = excel.load_xlsx(file_path)
last_scrape_date = excel.get_first_job_date(worksheet)
last_scrape_date = datetime.strptime(last_scrape_date, "%d-%b-%Y")
# If not, create a new workbook and append all of the jobs posted within the month
else:
print("Creating new Excel workbook.", "-" * 75, sep="\n")
current_date = datetime.today()
date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of weeks in a month
last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight
workbook, worksheet = excel.init_xlsx(worksheet_title="Job Openings")
# Open webdriver to workinstartups.com and create soup
print("Creating soup and opening Chrome webdriver", "-"*75, sep="\n")
URL = "https://workinstartups.com/job-board/jobs-in/london"
soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0)
driver = webdriver.Chrome('./chromedriver')
driver.get(URL)
driver.find_element_by_link_text('Close').click()
# Scrap the jobs from workinstartups.com and update the worksheet with the found jobs
print("Scraping jobs from workinstartups.com. Please wait.", "-" * 75, sep="\n")
job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver)
print("Scraping finished. Updating and saving Excel workbook.", "-" * 75, sep="\n")
driver.close()
excel.update_xlsx(worksheet, job_list)
excel.save_xlsx(workbook, file_path)
print("Finished!", sep="\n")
| 47
| 116
| 0.735816
| 350
| 2,256
| 4.608571
| 0.382857
| 0.017359
| 0.022319
| 0.009919
| 0.027898
| 0.00434
| 0
| 0
| 0
| 0
| 0
| 0.014359
| 0.135638
| 2,256
| 47
| 117
| 48
| 0.812821
| 0.253103
| 0
| 0
| 0
| 0
| 0.260143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.151515
| 0
| 0.151515
| 0.242424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac3e6f75c6ad2e83d2f026142ba224b4bab8c20
| 2,507
|
py
|
Python
|
src/data_loader/input_data_loader.py
|
ChristopherBrix/Debona
|
f000f3d483b2cc592233d0ba2a1a0327210562c8
|
[
"BSD-2-Clause"
] | 2
|
2020-07-26T09:48:22.000Z
|
2021-09-30T01:51:13.000Z
|
src/data_loader/input_data_loader.py
|
ChristopherBrix/Debona
|
f000f3d483b2cc592233d0ba2a1a0327210562c8
|
[
"BSD-2-Clause"
] | 2
|
2022-01-13T03:56:13.000Z
|
2022-03-12T01:03:29.000Z
|
src/data_loader/input_data_loader.py
|
ChristopherBrix/Debona
|
f000f3d483b2cc592233d0ba2a1a0327210562c8
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Functions for loading input data.
Author: Patrick Henriksen <[email protected]>
"""
import os
import numpy as np
def load_img(path: str, img_nums: list, shape: tuple) -> np.array:
"""
Loads a image in the human-readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
shape:
The shape of a single image.
Returns:
The images as a MxCx28x28 numpy array.
"""
images = np.zeros((len(img_nums), *shape), dtype=float)
for idx, i in enumerate(img_nums):
file = os.path.join(path, "image" + str(i))
with open(file, "r") as f:
data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]]
images[idx, :, :] = np.array(data).reshape(*shape)
return images
def load_mnist_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads a mnist image from the neurify dataset.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx28x28 numpy array.
"""
return load_img(path, img_nums, (28, 28))
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads the Cifar10 images in human readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx3x32x32 numpy array.
"""
return load_img(path, img_nums, (3, 32, 32))
def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100,
image_shape: tuple = (3, 32, 32)) -> tuple:
"""
Loads the images from the eran csv.
Args:
The csv path
Returns:
images, targets
"""
num_images = 100
images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32)
targets_array = np.zeros(num_images, dtype=int)
with open(img_csv, "r") as file:
for j in range(num_images):
line_arr = file.readline().split(",")
targets_array[j] = int(line_arr[0])
images_array[j] = [float(pixel) for pixel in line_arr[1:]]
return images_array.reshape((num_images, *image_shape)), targets_array
| 25.845361
| 101
| 0.603111
| 360
| 2,507
| 4.083333
| 0.241667
| 0.047619
| 0.022449
| 0.028571
| 0.431293
| 0.35034
| 0.35034
| 0.35034
| 0.304082
| 0.304082
| 0
| 0.025253
| 0.28919
| 2,507
| 96
| 102
| 26.114583
| 0.799663
| 0.387316
| 0
| 0
| 0
| 0
| 0.03532
| 0.028698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.08
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac4b5f3fcc2b83c0b6c655a23b542fa299d00d2
| 41,041
|
py
|
Python
|
pandas/io/sql.py
|
danbirken/pandas
|
fa8a5ca1dd27c4169727070ddbdcb248002fddb4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/io/sql.py
|
danbirken/pandas
|
fa8a5ca1dd27c4169727070ddbdcb248002fddb4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/io/sql.py
|
danbirken/pandas
|
fa8a5ca1dd27c4169727070ddbdcb248002fddb4
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, timedelta
import warnings
import traceback
import itertools
import re
import numpy as np
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
#------------------------------------------------------------------------------
# Helper functions
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
else:
return to_datetime(col, coerce=True, format=format)
def _parse_date_columns(data_frame, parse_dates):
""" Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
#------------------------------------------------------------------------------
#--- Deprecated tquery and uquery
def _safe_fetch(cur):
try:
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
except Exception as e: # pragma: no cover
excName = e.__class__.__name__
if excName == 'OperationalError':
return []
def tquery(sql, con=None, cur=None, retry=True):
"""
DEPRECATED. Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con, params).fetchall()
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: depreciated, cursor is obtained from connection
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is depreciated, and will be removed in future versions. "
"You can use ``execute(...).fetchall()`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
if con is not None:
try:
cur.close()
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
traceback.print_exc()
if retry:
return tquery(sql, con=con, retry=False)
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
elif result is None: # pragma: no cover
result = []
return result
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
DEPRECATED. Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for update queries.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con).rowcount
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is depreciated, and will be removed in future versions. "
"You can use ``execute(...).rowcount`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur, params=params)
result = cur.rowcount
try:
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName != 'OperationalError':
raise
traceback.print_exc()
if retry:
print('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
#------------------------------------------------------------------------------
#--- Read and write to DataFrames
def read_sql_table(table_name, con, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy engine, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Sqlite DBAPI conncection mode not supported
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
Returns
-------
DataFrame
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
pandas_sql = PandasSQLAlchemy(con)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_sql(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed or database table name.
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table (only used when reading
a table).
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query).
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, PandasSQLLegacy):
return pandas_sql.read_sql(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
if pandas_sql.has_table(sql):
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
else:
return pandas_sql.read_sql(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
index_label=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label)
def has_table(table_name, con, flavor='sqlite'):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.has_table(table_name)
table_exists = has_table
_MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated "
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy engines.")
def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
try:
import sqlalchemy
if isinstance(con, sqlalchemy.engine.Engine):
return PandasSQLAlchemy(con, meta=meta)
else:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)
except ImportError:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)
class PandasSQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
if frame is not None:
# We want to write a frame
if self.pd_sql.has_table(self.name):
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replace':
self.pd_sql.drop_table(self.name)
self.table = self._create_table_statement()
self.create()
elif if_exists == 'append':
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
self.table = self._create_table_statement()
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(if_exists))
else:
self.table = self._create_table_statement()
self.create()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table))
def create(self):
self.table.create()
def insert_statement(self):
return self.table.insert()
def maybe_asscalar(self, i):
try:
return np.asscalar(i)
except AttributeError:
return i
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
return temp
def insert(self):
ins = self.insert_statement()
data_list = []
temp = self.insert_data()
keys = temp.columns
for t in temp.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data_list.append(data)
self.pd_sql.execute(ins, data_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
data = result.fetchall()
column_names = result.keys()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if nlevels == 1 and 'index' not in self.frame.columns and self.frame.index.name is None:
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _create_table_statement(self):
from sqlalchemy import Table, Column
columns = list(map(str, self.frame.columns))
column_types = map(self._sqlalchemy_type, self.frame.dtypes)
columns = [Column(name, typ)
for name, typ in zip(columns, column_types)]
if self.index is not None:
for i, idx_label in enumerate(self.index[::-1]):
idx_type = self._sqlalchemy_type(
self.frame.index.get_level_values(i))
columns.insert(0, Column(idx_label, idx_type, index=True))
return Table(self.name, self.pd_sql.meta, *columns)
def _harmonize_columns(self, parse_dates=None):
""" Make a data_frame's column type align with an sql_table
column types
Need to work around limited NA value support.
Floats are always fine, ints must always
be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted
to np.datetime if supported, but here we also force conversion
if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._numpy_type(sql_col.type)
if col_type is datetime or col_type is date:
if not issubclass(df_col.dtype.type, np.datetime64):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name].astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is int or col_type is bool:
self.frame[col_name].astype(col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, arr_or_dtype):
from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval
if arr_or_dtype is date:
return Date
if com.is_datetime64_dtype(arr_or_dtype):
try:
tz = arr_or_dtype.tzinfo
return DateTime(timezone=True)
except:
return DateTime
if com.is_timedelta64_dtype(arr_or_dtype):
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
return Integer
elif com.is_float_dtype(arr_or_dtype):
return Float
elif com.is_integer_dtype(arr_or_dtype):
# TODO: Refine integer size.
return Integer
elif com.is_bool(arr_or_dtype):
return Boolean
return Text
def _numpy_type(self, sqltype):
from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date
if isinstance(sqltype, Float):
return float
if isinstance(sqltype, Integer):
# TODO: Refine integer size.
return int
if isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
if isinstance(sqltype, Date):
return date
if isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor")
def to_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor")
class PandasSQLAlchemy(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
"""
def __init__(self, engine, meta=None):
self.engine = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.engine)
meta.reflect(self.engine)
self.meta = meta
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy engine"""
return self.engine.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
table = PandasSQLTable(table_name, self, index=index_col)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
def read_sql(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None):
args = _convert_params(sql, params)
result = self.execute(*args)
data = result.fetchall()
columns = result.keys()
data_frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
_parse_date_columns(data_frame, parse_dates)
if index_col is not None:
data_frame.set_index(index_col, inplace=True)
return data_frame
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None):
table = PandasSQLTable(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
table.insert()
@property
def tables(self):
return self.meta.tables
def has_table(self, name):
if self.meta.tables.get(name) is not None:
return True
else:
return False
def get_table(self, table_name):
return self.meta.tables.get(table_name)
def drop_table(self, table_name):
if self.engine.has_table(table_name):
self.get_table(table_name).drop()
self.meta.clear()
self.meta.reflect()
def _create_sql_schema(self, frame, table_name):
table = PandasSQLTable(table_name, self, frame=frame)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# Flavour specific sql strings and handler class for access to DBs without
# SQLAlchemy installed
# SQL type convertions for each DB
_SQL_TYPES = {
'text': {
'mysql': 'VARCHAR (63)',
'sqlite': 'TEXT',
},
'float': {
'mysql': 'FLOAT',
'sqlite': 'REAL',
},
'int': {
'mysql': 'BIGINT',
'sqlite': 'INTEGER',
},
'datetime': {
'mysql': 'DATETIME',
'sqlite': 'TIMESTAMP',
},
'date': {
'mysql': 'DATE',
'sqlite': 'TIMESTAMP',
},
'bool': {
'mysql': 'BOOLEAN',
'sqlite': 'INTEGER',
}
}
# SQL enquote and wildcard symbols
_SQL_SYMB = {
'mysql': {
'br_l': '`',
'br_r': '`',
'wld': '%s'
},
'sqlite': {
'br_l': '[',
'br_r': ']',
'wld': '?'
}
}
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class PandasSQLTableLegacy(PandasSQLTable):
"""Patch the PandasSQLTable for legacy support.
Instead of a table variable just use the Create Table
statement"""
def sql_schema(self):
return str(self.table)
def create(self):
self.pd_sql.execute(self.table)
def insert_statement(self):
names = list(map(str, self.frame.columns))
flv = self.pd_sql.flavor
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
wld = _SQL_SYMB[flv]['wld'] # wildcard char
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [br_l + column + br_r for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.name, col_names, wildcards)
return insert_statement
def insert(self):
ins = self.insert_statement()
temp = self.insert_data()
data_list = []
for t in temp.itertuples():
data = tuple((self.maybe_asscalar(v) for v in t[1:]))
data_list.append(data)
cur = self.pd_sql.con.cursor()
cur.executemany(ins, data_list)
cur.close()
self.pd_sql.con.commit()
def _create_table_statement(self):
"Return a CREATE TABLE statement to suit the contents of a DataFrame."
columns = list(map(str, self.frame.columns))
pat = re.compile('\s+')
if any(map(pat.search, columns)):
warnings.warn(_SAFE_NAMES_WARNING)
column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes]
if self.index is not None:
for i, idx_label in enumerate(self.index[::-1]):
columns.insert(0, idx_label)
column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype))
flv = self.pd_sql.flavor
br_l = _SQL_SYMB[flv]['br_l'] # left val quote char
br_r = _SQL_SYMB[flv]['br_r'] # right val quote char
col_template = br_l + '%s' + br_r + ' %s'
columns = ',\n '.join(col_template %
x for x in zip(columns, column_types))
template = """CREATE TABLE %(name)s (
%(columns)s
)"""
create_statement = template % {'name': self.name, 'columns': columns}
return create_statement
def _sql_type_name(self, dtype):
pytype = dtype.type
pytype_name = "text"
if issubclass(pytype, np.floating):
pytype_name = "float"
elif com.is_timedelta64_dtype(pytype):
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning)
pytype_name = "int"
elif issubclass(pytype, np.integer):
pytype_name = "int"
elif issubclass(pytype, np.datetime64) or pytype is datetime:
# Caution: np.datetime64 is also a subclass of np.number.
pytype_name = "datetime"
elif pytype is datetime.date:
pytype_name = "date"
elif issubclass(pytype, np.bool_):
pytype_name = "bool"
return _SQL_TYPES[pytype_name][self.pd_sql.flavor]
class PandasSQLLegacy(PandasSQL):
def __init__(self, con, flavor, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
if flavor is None:
flavor = 'sqlite'
if flavor not in ['sqlite', 'mysql']:
raise NotImplementedError
else:
self.flavor = flavor
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as e:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError(
"Execution failed on sql: %s\n%s\nunable to rollback" % (args[0], e))
raise_with_traceback(ex)
ex = DatabaseError("Execution failed on sql: %s" % args[0])
raise_with_traceback(ex)
def read_sql(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
data = self._fetchall_as_list(cursor)
cursor.close()
data_frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
_parse_date_columns(data_frame, parse_dates)
if index_col is not None:
data_frame.set_index(index_col, inplace=True)
return data_frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
flavor: {'sqlite', 'mysql'}, default 'sqlite'
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
"""
table = PandasSQLTableLegacy(
name, self, frame=frame, index=index, if_exists=if_exists,
index_label=index_label)
table.insert()
def has_table(self, name):
flavor_map = {
'sqlite': ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name='%s';") % name,
'mysql': "SHOW TABLES LIKE '%s'" % name}
query = flavor_map.get(self.flavor)
return len(self.execute(query).fetchall()) > 0
def get_table(self, table_name):
return None # not supported in Legacy mode
def drop_table(self, name):
drop_sql = "DROP TABLE %s" % name
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name):
table = PandasSQLTableLegacy(table_name, self, frame=frame)
return str(table.sql_schema())
def get_schema(frame, name, flavor='sqlite', keys=None, con=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
keys : string or sequence
columns to use a primary key
con: an open SQL database connection object or an SQLAlchemy engine
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
"""
if con is None:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return _get_schema_legacy(frame, name, flavor, keys)
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name)
def _get_schema_legacy(frame, name, flavor, keys=None):
"""Old function from 0.13.1. To keep backwards compatibility.
When mysql legacy support is dropped, it should be possible to
remove this code
"""
def get_sqltype(dtype, flavor):
pytype = dtype.type
pytype_name = "text"
if issubclass(pytype, np.floating):
pytype_name = "float"
elif issubclass(pytype, np.integer):
pytype_name = "int"
elif issubclass(pytype, np.datetime64) or pytype is datetime:
# Caution: np.datetime64 is also a subclass of np.number.
pytype_name = "datetime"
elif pytype is datetime.date:
pytype_name = "date"
elif issubclass(pytype, np.bool_):
pytype_name = "bool"
return _SQL_TYPES[pytype_name][flavor]
lookup_type = lambda dtype: get_sqltype(dtype, flavor)
column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes))
if flavor == 'sqlite':
columns = ',\n '.join('[%s] %s' % x for x in column_types)
else:
columns = ',\n '.join('`%s` %s' % x for x in column_types)
keystr = ''
if keys is not None:
if isinstance(keys, string_types):
keys = (keys,)
keystr = ', PRIMARY KEY (%s)' % ','.join(keys)
template = """CREATE TABLE %(name)s (
%(columns)s
%(keystr)s
);"""
create_statement = template % {'name': name, 'columns': columns,
'keystr': keystr}
return create_statement
# legacy names, with depreciation warnings and copied docs
def read_frame(*args, **kwargs):
"""DEPRECIATED - use read_sql
"""
warnings.warn("read_frame is depreciated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def frame_query(*args, **kwargs):
"""DEPRECIATED - use read_sql
"""
warnings.warn("frame_query is depreciated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECIATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is depreciated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs)
# Append wrapped function docstrings
read_frame.__doc__ += read_sql.__doc__
frame_query.__doc__ += read_sql.__doc__
| 33.806425
| 103
| 0.608002
| 5,104
| 41,041
| 4.752351
| 0.104232
| 0.020201
| 0.006308
| 0.007421
| 0.516738
| 0.48421
| 0.45061
| 0.415485
| 0.393758
| 0.37331
| 0
| 0.002747
| 0.299213
| 41,041
| 1,213
| 104
| 33.834295
| 0.840618
| 0.313686
| 0
| 0.401244
| 0
| 0
| 0.089053
| 0.001944
| 0
| 0
| 0
| 0.001649
| 0
| 1
| 0.093313
| false
| 0.004666
| 0.032659
| 0.009331
| 0.253499
| 0.007776
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac61484010824f5bc86d5e3f43da1576d3d9bbb
| 4,411
|
py
|
Python
|
Systerm/meta.py
|
ZytroCode/Systerm
|
688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7
|
[
"MIT"
] | 1
|
2022-03-01T02:36:29.000Z
|
2022-03-01T02:36:29.000Z
|
Systerm/meta.py
|
ZytroCode/Systerm
|
688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7
|
[
"MIT"
] | 1
|
2022-03-04T03:20:50.000Z
|
2022-03-04T03:20:50.000Z
|
Systerm/meta.py
|
ZytroCode/Systerm
|
688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7
|
[
"MIT"
] | null | null | null |
"""Meta is a module contains objects that will customize the behavior of python."""
from abc import ABC
from abc import ABCMeta
from abc import abstractmethod
from typing import Any
from typing import Callable
import Systerm
# Metaclass
class Metaclass(ABCMeta):
"""A metaclass to customize the behavior of all classes."""
def __new__(self, name: str, bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any) -> type:
"""The static constructor for the Metaclass.
Parameters:
name - str The name of the class
bases - tuple[type, ...] A tuple of classes to inherit
attrs - dict[str, Any] A dictionary of attributes
**keys - Any Keyword arguments to pass in
"""
# Creating a new class
cls = super().__new__(self, name, bases, dict(attrs), **keys)
cls.__setattr__ = self.setattr
# Custom magic methods
cls.__namespaces__ = {}
cls.__magics__ = {}
cls.__attributes__ = {}
cls.__publics__ = {}
cls.__privates__ = {}
cls.__protecteds__ = {}
# Setting objects
for name in dir(cls):
value = getattr(cls, name)
# Adds attributes to __magics__
if name.startswith("__") and name.endswith("__"):
cls.__magics__[name] = value
# Adds attributes to other namespace
else:
# Adds attributes to __privates__
if name.startswith("__"):
cls.__privates__[name] = value
# Adds attributes to __protecteds__
elif name.startswith("_"):
cls.__protecteds__[name] = value
# Adds attributes to __publics__
else:
cls.__publics__[name] = value
cls.__attributes__[name] = value
# Adds attributes to namespace
cls.__namespaces__[name] = value
return cls
def setattr(self, name: str, value: object) -> None:
# Adds attributes to __magics__
if name.startswith("__") and name.endswith("__"):
self.__magics__[name] = value
# Adds attributes to other namespace
else:
# Adds attributes to __privates__
if name.startswith("__"):
self.__privates__[name] = value
# Adds attributes to __protecteds__
elif name.startswith("_"):
self.__protecteds__[name] = value
# Adds attributes to __publics__
else:
self.__publics__[name] = value
self.__attributes__[name] = value
# Adds attributes to namespace
self.__namespaces__[name] = value
# Object class
class Object(object, metaclass=Metaclass):
pass
# List class
class List(list, metaclass=Metaclass):
pass
# Dictionary class
class Dictionary(dict, metaclass=Metaclass):
def __getattr__(self, name: str) -> None:
try:
return self[name]
except KeyError as e:
try:
return super().__getattr__(name)
except AttributeError:
raise e
def __setattr__(self, name: str, value: object) -> None:
self[name] = value
# Recreating ABC
ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)})
def get_namespaces(object: Object) -> Dictionary:
"""Gets the namespaces of an object."""
return object.__namespaces__
def get_magics(object: Object) -> Dictionary:
"""Gets the magic methods of an object."""
return object.__magics__
def get_attributes(object: Object) -> Dictionary:
"""Gets the attributes of an object."""
return object.__attributes__
def get_publics(object: Object) -> Dictionary:
"""Gets the public namespaces of an object."""
return object.__publics__
def get_privates(object: Object) -> Dictionary:
"""Gets the private namespaces of an object."""
return object.__privates__
def get_protecteds(object: Object) -> Dictionary:
"""Gets the protected namespaces of an object."""
return object.__protecteds__
# Initializing Systerm.module
from Systerm._setup import init_module
module = init_module()
# MetaMod class
class MetaMod(module.Module):
pass
module.modules[__name__].__class__ = MetaMod
| 30.42069
| 102
| 0.608025
| 472
| 4,411
| 5.277542
| 0.205508
| 0.046969
| 0.077077
| 0.073866
| 0.403051
| 0.315536
| 0.264151
| 0.228824
| 0.162987
| 0.162987
| 0
| 0
| 0.301519
| 4,411
| 144
| 103
| 30.631944
| 0.808504
| 0.272727
| 0
| 0.2
| 0
| 0
| 0.004528
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0.04
| 0.093333
| 0
| 0.413333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac6cf77a3b421f63bd83476f536c84c12d3066c
| 11,859
|
py
|
Python
|
samples/apps/txregulator/tests/txregulatorclient.py
|
iqsarv/CCF
|
5cc33a1f0e06eb2a25dc1ebd0e2153881962b889
|
[
"Apache-2.0"
] | 1
|
2020-02-03T21:57:22.000Z
|
2020-02-03T21:57:22.000Z
|
samples/apps/txregulator/tests/txregulatorclient.py
|
kuychaco/CCF
|
e11acde3be6a7d2213fe5b406b959bb5bb64361d
|
[
"Apache-2.0"
] | null | null | null |
samples/apps/txregulator/tests/txregulatorclient.py
|
kuychaco/CCF
|
e11acde3be6a7d2213fe5b406b959bb5bb64361d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.e2e_args
import infra.ccf
import infra.jsonrpc
import logging
from time import gmtime, strftime
import csv
import random
from loguru import logger as LOG
class AppUser:
def __init__(self, network, name, country, curve):
self.name = name
self.country = country
primary, _ = network.find_primary()
network.create_users([self.name], curve)
network.consortium.add_users(primary, [self.name])
with primary.user_client(user_id=self.name) as client:
self.ccf_id = client.rpc("whoAmI", {}).result["caller_id"]
def __str__(self):
return f"{self.ccf_id} ({self.name})"
def run(args):
hosts = ["localhost"]
with infra.ccf.network(
hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
check = infra.checker.Checker()
network.start_and_join(args)
primary, others = network.find_nodes()
script = "if tonumber(amt) > 200000 then return true else return false end"
if args.lua_script is not None:
data = []
with open(args.lua_script, "r") as f:
data = f.readlines()
script = "".join(data)
manager = AppUser(network, "manager", "GB", args.default_curve)
regulator = AppUser(network, "auditor", "GB", args.default_curve)
banks = [
AppUser(network, f"bank{country}", country, args.default_curve)
for country in ("US", "GB", "GR", "FR")
]
transactions = []
with open(args.datafile, newline="") as f:
datafile = csv.DictReader(f)
for i, row in enumerate(datafile):
# read first 10 lines
if i > 10:
break
json_tx = {
"src": row["origin"],
"dst": row["destination"],
"amt": row["amount"],
"type": row["type"],
"timestamp": strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()),
"src_country": row["src_country"],
"dst_country": row["dst_country"],
}
transactions.append(json_tx)
# Manager is granted special privileges by members, which is later read by app to enforce access restrictions
proposal_result, error = network.consortium.propose(
0,
primary,
f"""
return Calls:call(
"set_user_data",
{{
user_id = {manager.ccf_id},
user_data = {{
privileges = {{
REGISTER_REGULATORS = true,
REGISTER_BANKS = true,
}}
}}
}}
)
""",
)
network.consortium.vote_using_majority(primary, proposal_result["id"])
# Check permissions are enforced
with primary.user_client(user_id=regulator.name) as c:
check(
c.rpc("REG_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
check(
c.rpc("BK_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
with primary.user_client(user_id=banks[0].name) as c:
check(
c.rpc("REG_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
check(
c.rpc("BK_register", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
# As permissioned manager, register regulator and banks
with primary.node_client() as mc:
check_commit = infra.checker.Checker(mc)
with primary.user_client(format="msgpack", user_id=manager.name) as c:
check(
c.rpc(
"REG_register",
{
"regulator_id": regulator.ccf_id,
"country": regulator.country,
"script": script,
},
),
result=regulator.ccf_id,
)
check(
c.rpc("REG_get", {"id": regulator.ccf_id}),
result=[regulator.country, script],
)
check(
c.rpc(
"BK_register",
{"bank_id": regulator.ccf_id, "country": regulator.country},
),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
LOG.debug(f"User {regulator} successfully registered as regulator")
for bank in banks:
check(
c.rpc(
"BK_register",
{"bank_id": bank.ccf_id, "country": bank.country},
),
result=bank.ccf_id,
)
check(c.rpc("BK_get", {"id": bank.ccf_id}), result=bank.country)
check(
c.rpc(
"REG_register",
{"regulator_id": bank.ccf_id, "country": bank.country},
),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
LOG.debug(f"User {bank} successfully registered as bank")
LOG.success(f"{1} regulator and {len(banks)} bank(s) successfully setup")
tx_id = 0 # Tracks how many transactions have been issued
# tracks flagged/non flagged and revealed/non revealed transactions for validation
flagged_txs = {}
revealed_tx_ids = []
flagged_ids = []
non_flagged_ids = []
flagged_amt = 200000
for i, bank in enumerate(banks):
with primary.user_client(format="msgpack", user_id=bank.name) as c:
# Destination account is the next one in the list of banks
for transaction in transactions:
print(transaction)
amount = transaction["amt"]
check(c.rpc("TX_record", transaction), result=tx_id)
check(
c.rpc("TX_get", {"tx_id": tx_id}),
result={
"amt": amount,
"bank_id": bank.ccf_id,
"dst": transaction["dst"],
"dst_country": transaction["dst_country"],
"src": transaction["src"],
"src_country": transaction["src_country"],
"timestamp": transaction["timestamp"],
"type": transaction["type"],
},
)
if float(amount) > flagged_amt:
check(
c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}),
result=[regulator.ccf_id, False, transaction["timestamp"]],
)
flagged_tx = {
"amt": amount,
"bank_id": bank.ccf_id,
"dst": transaction["dst"],
"dst_country": transaction["dst_country"],
"src": transaction["src"],
"src_country": transaction["src_country"],
"timestamp": transaction["timestamp"],
"tx_id": tx_id,
"type": transaction["type"],
}
flagged_ids.append(tx_id)
flagged_txs[tx_id] = flagged_tx
else:
check(
c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"]
== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
non_flagged_ids.append(tx_id)
tx_id += 1
LOG.success(f"{tx_id} transactions have been successfully issued")
# bank that issued first flagged transaction
with primary.user_client(format="msgpack", user_id=bank.name) as c:
# try to poll flagged but fail as you are not a regulator
check(
c.rpc("REG_poll_flagged", {}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value,
)
# bank reveal some transactions that were flagged
for i, tx_id in enumerate(flagged_ids):
if i % 2 == 0:
check(c.rpc("TX_reveal", {"tx_id": tx_id}), result=True)
revealed_tx_ids.append(tx_id)
# bank try to reveal non flagged txs
for tx_id in non_flagged_ids:
check(
c.rpc("TX_reveal", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
# regulator poll for transactions that are flagged
with primary.node_client() as mc:
with primary.user_client(format="msgpack", user_id=regulator.name) as c:
# assert that the flagged txs that we poll for are correct
resp = c.rpc("REG_poll_flagged", {})
poll_flagged_ids = []
for poll_flagged in resp.result:
# poll flagged is a list [tx_id, regulator_id]
poll_flagged_ids.append(poll_flagged[0])
poll_flagged_ids.sort()
assert poll_flagged_ids == flagged_ids
for tx_id in flagged_ids:
# get from flagged txs, try to get the flagged one that was not revealed
if tx_id not in revealed_tx_ids:
check(
c.rpc("REG_get_revealed", {"tx_id": tx_id}),
error=lambda e: e is not None
and e["code"]
== infra.jsonrpc.ErrorCode.INVALID_PARAMS.value,
)
# get from flagged txs, try to get the flagged ones that were revealed
for tx_id in revealed_tx_ids:
check(
c.rpc("REG_get_revealed", {"tx_id": tx_id}),
result=flagged_txs[tx_id],
)
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--lua-script", help="Regulator checker loaded as lua script file", type=str
)
parser.add_argument(
"--datafile", help="Load an existing scenario file (csv)", type=str
)
args = infra.e2e_args.cli_args(add)
args.package = args.app_script and "libluageneric" or "liblogging"
run(args)
| 39.795302
| 117
| 0.474239
| 1,204
| 11,859
| 4.498339
| 0.192691
| 0.022895
| 0.031573
| 0.024003
| 0.391433
| 0.368907
| 0.340473
| 0.297821
| 0.292836
| 0.256647
| 0
| 0.004714
| 0.427523
| 11,859
| 297
| 118
| 39.929293
| 0.793048
| 0.081457
| 0
| 0.341564
| 0
| 0
| 0.149991
| 0
| 0
| 0
| 0
| 0
| 0.004115
| 1
| 0.016461
| false
| 0
| 0.032922
| 0.004115
| 0.061728
| 0.004115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac72633419a62f181f2995c29a463e6cede8eca
| 4,925
|
py
|
Python
|
src/finmag/sim/hysteresis.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 10
|
2018-03-24T07:43:17.000Z
|
2022-03-26T10:42:27.000Z
|
src/finmag/sim/hysteresis.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 21
|
2018-03-26T15:08:53.000Z
|
2021-07-10T16:11:14.000Z
|
src/finmag/sim/hysteresis.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 7
|
2018-04-09T11:50:48.000Z
|
2021-06-10T09:23:25.000Z
|
import os
import re
import glob
import logging
import textwrap
import fileinput
import numpy as np
from finmag.energies import Zeeman
from finmag.util.helpers import norm
log = logging.getLogger(name="finmag")
def hysteresis(sim, H_ext_list, fun=None, **kwargs):
"""
Set the applied field to the first value in `H_ext_list` (which should
be a list of external field vectors) and then call the relax() method.
When convergence is reached, the field is changed to the next one in
H_ext_list, and so on until all values in H_ext_list are exhausted.
Note: The fields in H_ext_list are applied *in addition to* any Zeeman
interactions that are already present in the simulation.
In particular, if only one external field should be present then
do not add any Zeeman interactions before calling this method.
If you would like to perform a certain action (e.g. save a VTK
snapshot of the magnetisation) at the end of each relaxation stage,
use the sim.schedule() command with the directive 'at_end=True' as
in the following example:
sim.schedule('save_vtk', at_end=True, ...)
sim.hysteresis(...)
*Arguments*
H_ext_list: list of 3-vectors
List of external fields, where each field can have any of
the forms accepted by Zeeman.__init__() (see its docstring
for more details).
fun: callable
The user can pass a function here (which should accept the
Simulation object as its only argument); this function is
called after each relaxation and determines the return
value (see below). For example, if
fun = (lambda sim: sim.m_average[0])
then the return value is a list of values representing the
average x-component of the magnetisation at the end of
each relaxation.
All other keyword arguments are passed on to the relax() method.
See its documentation for details.
*Return value*
If `fun` is not None then the return value is a list containing an
accumulation of all the return values of `fun` after each stage.
Otherwise the return value is None.
"""
if H_ext_list == []:
return
# Add a new Zeeman interaction, initialised to zero.
H = Zeeman((0, 0, 0))
sim.add(H)
# We keep track of the current stage of the hysteresis loop.
cur_stage = 0
num_stages = len(H_ext_list)
res = []
try:
while True:
H_cur = H_ext_list[cur_stage]
log.info(
"Entering hysteresis stage #{} ({} out of {}). Current field: "
"{}".format(cur_stage, cur_stage + 1, num_stages, H_cur))
H.set_value(H_cur)
sim.relax(**kwargs)
cur_stage += 1
if fun is not None:
retval = fun(sim)
res.append(retval)
log.debug("hysteresis callback function '{}' returned "
"value: {}".format(fun.__name__, retval))
except IndexError:
log.info("Hysteresis is finished.")
log.info("Removing the applied field used for hysteresis.")
sim.remove_interaction(H.name)
return res or None
def hysteresis_loop(sim, H_max, direction, N, **kwargs):
"""
Compute a hysteresis loop. This is a specialised convenience
version of the more general `hysteresis` method. It computes a
hysteresis loop where the external field is applied along a
single axis and changes magnitude from +H_max to -H_max and
back (using N steps in each direction).
The return value is a pair (H_vals, m_vals), where H_vals is
the list of field strengths at which a relaxation is performed
and m_vals is a list of scalar values containing, for each
field value, the averaged value of the magnetisation along the
axis `direction` (after relaxation has been reached). Thus the
command plot(H_vals, m_vals) could be used to plot the
hysteresis loop.
direction -- a vector indicating the direction of the
external field (will be normalised
automatically)
H_max -- maximum field strength
N -- number of data points to compute in each direction
(thus the total number of data points for the entire
loop will be 2*N-1)
kwargs -- any keyword argument accepted by the hysteresis() method
"""
d = np.array(direction)
H_dir = d / norm(d)
H_norms = list(np.linspace(H_max, -H_max, N)) + \
list(np.linspace(-H_max, H_max, N))
H_vals = [h * H_dir for h in H_norms]
m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs)
# projected lengths of the averaged magnetisation values along the axis
# `H_dir`
m_vals = [np.dot(m, H_dir) for m in m_avg]
return (H_norms, m_vals)
| 34.929078
| 79
| 0.650355
| 724
| 4,925
| 4.325967
| 0.321823
| 0.011494
| 0.022989
| 0.012771
| 0.094828
| 0.072158
| 0.057471
| 0.041507
| 0.02682
| 0
| 0
| 0.002828
| 0.28203
| 4,925
| 140
| 80
| 35.178571
| 0.882919
| 0.622132
| 0
| 0
| 0
| 0
| 0.118047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.195652
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac8bc92bddd721b23be9da9373cb90b73f83f01
| 1,200
|
py
|
Python
|
core/controllers/services.py
|
willingc/oh-missions-oppia-beta
|
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/services.py
|
willingc/oh-missions-oppia-beta
|
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
|
[
"Apache-2.0"
] | 2
|
2021-06-10T23:58:39.000Z
|
2021-12-13T20:51:34.000Z
|
core/controllers/services.py
|
willingc/oh-missions-oppia-beta
|
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for miscellaneous services."""
__author__ = 'Tarashish Mishra'
import base64
import json
from core.controllers import base
class FileReadHandler(base.BaseHandler):
"""Returns a base64-encoded ascii string with uploaded file's content."""
def post(self):
raw_file_content = self.request.get('file')
encoded_content = base64.b64encode(raw_file_content)
self.response.headers['Content-Type'] = 'application/json'
response = {
'base64_file_content': encoded_content,
}
self.response.out.write(json.dumps(response))
| 31.578947
| 77
| 0.726667
| 161
| 1,200
| 5.341615
| 0.63354
| 0.069767
| 0.030233
| 0.037209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018443
| 0.186667
| 1,200
| 37
| 78
| 32.432432
| 0.862705
| 0.565833
| 0
| 0
| 0
| 0
| 0.134538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ac9b8651f0cd02d3cb27eefe5c6577d55fc334a
| 4,080
|
py
|
Python
|
libs/configs/COCO/cfgs_res50_1x_coco_v3.py
|
lj-ecjtu/Cascade_FPN_Tensorflow-master
|
40fcd2c10f057b3f015ca1380d7db102e967391f
|
[
"MIT"
] | 43
|
2019-04-25T08:07:49.000Z
|
2021-08-24T08:33:37.000Z
|
libs/configs/COCO/cfgs_res50_1x_coco_v3.py
|
lj-ecjtu/Cascade_FPN_Tensorflow-master
|
40fcd2c10f057b3f015ca1380d7db102e967391f
|
[
"MIT"
] | 16
|
2019-05-11T03:51:19.000Z
|
2021-10-09T08:26:18.000Z
|
libs/configs/COCO/cfgs_res50_1x_coco_v3.py
|
lj-ecjtu/Cascade_FPN_Tensorflow-master
|
40fcd2c10f057b3f015ca1380d7db102e967391f
|
[
"MIT"
] | 15
|
2019-04-29T03:26:35.000Z
|
2020-05-26T05:35:39.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
'''
gluoncv backbone + multi_gpu
'''
# ------------------------------------------------
VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3'
NET_NAME = 'resnet50_v1d'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2,3,4,5,6,7"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 80000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = False
FIXED_BLOCKS = 0 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
CUDA9 = True
EVAL_THRESHOLD = 0.5
RPN_LOCATION_LOSS_WEIGHT = 1.
RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
BATCH_SIZE = 1
WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE)
LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE
DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000
MAX_ITERATION = 20*SAVE_WEIGHTS_INTE
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'coco' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1333
CLASS_NUM = 80
# --------------------------------------------- Network_config
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001
IS_ASSIGN = True
# ---------------------------------------------Anchor config
USE_CENTER_OFFSET = True
LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]
ANCHOR_SCALES = [1.0]
ANCHOR_RATIOS = [0.5, 1., 2.0]
ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]]
ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0]
# --------------------------------------------FPN config
SHARE_HEADS = True
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 6000
RPN_MAXIMUM_PROPOSAL_TEST = 1000
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.25
ADD_GTBOXES_TO_TRAIN = False
| 31.384615
| 100
| 0.684069
| 635
| 4,080
| 4.061417
| 0.393701
| 0.027918
| 0.034897
| 0.019775
| 0.155487
| 0.099263
| 0.083753
| 0.083753
| 0.068244
| 0.068244
| 0
| 0.08352
| 0.12549
| 4,080
| 129
| 101
| 31.627907
| 0.639294
| 0.199265
| 0
| 0
| 0
| 0
| 0.094881
| 0.059613
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032609
| 0
| 0.032609
| 0.032609
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0acab913205c6b28e2e38031c30bbb139185f389
| 3,055
|
py
|
Python
|
python/delta/tests/test_exceptions.py
|
vibhaska/delta
|
0e16356ff46520404e2376d048f002ca74f6dc0c
|
[
"Apache-2.0"
] | 1
|
2022-01-18T10:52:49.000Z
|
2022-01-18T10:52:49.000Z
|
python/delta/tests/test_exceptions.py
|
vibhaska/delta
|
0e16356ff46520404e2376d048f002ca74f6dc0c
|
[
"Apache-2.0"
] | null | null | null |
python/delta/tests/test_exceptions.py
|
vibhaska/delta
|
0e16356ff46520404e2376d048f002ca74f6dc0c
|
[
"Apache-2.0"
] | 1
|
2022-03-06T09:29:55.000Z
|
2022-03-06T09:29:55.000Z
|
#
# Copyright (2020) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import delta.exceptions as exceptions
from delta.testing.utils import DeltaTestCase
class DeltaExceptionTests(DeltaTestCase):
def _raise_concurrent_exception(self, exception_type):
e = exception_type("")
self.spark.sparkContext._jvm.scala.util.Failure(e).get()
def test_capture_concurrent_write_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException
self.assertRaises(exceptions.ConcurrentWriteException,
lambda: self._raise_concurrent_exception(e))
def test_capture_metadata_changed_exception(self):
e = self.spark._jvm.io.delta.exceptions.MetadataChangedException
self.assertRaises(exceptions.MetadataChangedException,
lambda: self._raise_concurrent_exception(e))
def test_capture_protocol_changed_exception(self):
e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException
self.assertRaises(exceptions.ProtocolChangedException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_append_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException
self.assertRaises(exceptions.ConcurrentAppendException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_delete_read_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException
self.assertRaises(exceptions.ConcurrentDeleteReadException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_delete_delete_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException
self.assertRaises(exceptions.ConcurrentDeleteDeleteException,
lambda: self._raise_concurrent_exception(e))
def test_capture_concurrent_transaction_exception(self):
e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException
self.assertRaises(exceptions.ConcurrentTransactionException,
lambda: self._raise_concurrent_exception(e))
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=4)
| 41.849315
| 87
| 0.734206
| 330
| 3,055
| 6.578788
| 0.369697
| 0.055274
| 0.088439
| 0.058038
| 0.32059
| 0.32059
| 0.304468
| 0.304468
| 0.304468
| 0.160295
| 0
| 0.00405
| 0.191817
| 3,055
| 72
| 88
| 42.430556
| 0.875253
| 0.18527
| 0
| 0.166667
| 0
| 0
| 0.010918
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.190476
| false
| 0
| 0.119048
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|