hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76d15f9d93efb01c92547e696339173cf885a335
| 18,576
|
py
|
Python
|
pp2_model.py
|
BetterManlinfeng/hyperclasspptwo
|
053e9cf8445911e285ac723bdfbceb1cb384ed2e
|
[
"Apache-2.0"
] | null | null | null |
pp2_model.py
|
BetterManlinfeng/hyperclasspptwo
|
053e9cf8445911e285ac723bdfbceb1cb384ed2e
|
[
"Apache-2.0"
] | null | null | null |
pp2_model.py
|
BetterManlinfeng/hyperclasspptwo
|
053e9cf8445911e285ac723bdfbceb1cb384ed2e
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.keras import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential,regularizers
from tensorflow.keras.layers import Dropout
# from tensorflow.keras import *
# 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal'
from tensorflow.python.keras.layers import Concatenate
def regularized_padded_conv(*args, **kwargs):
return layers.Conv2D(*args, **kwargs, padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(5e-4))
############################### 通道注意力机制 ###############################
class ChannelAttention(layers.Layer):
def __init__(self, in_planes, ratio=8):
super(ChannelAttention, self).__init__()
self.avg= layers.GlobalAveragePooling2D()
self.max= layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(in_planes//ratio, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu)
self.conv2 = layers.Conv2D(in_planes, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True)
def call(self, inputs):
avg = self.avg(inputs)
max = self.max(inputs)
avg = layers.Reshape((1, 1, avg.shape[1]))(avg) # shape (None, 1, 1 feature)
max = layers.Reshape((1, 1, max.shape[1]))(max) # shape (None, 1, 1 feature)
avg_out = self.conv2(self.conv1(avg))
max_out = self.conv2(self.conv1(max))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
return out
############################### 空间注意力机制 ###############################
class SpatialAttention(layers.Layer):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = regularized_padded_conv(1, kernel_size=kernel_size, strides=1, activation=tf.nn.sigmoid)
def call(self, inputs):
avg_out = tf.reduce_mean(inputs, axis=3)
max_out = tf.reduce_max(inputs, axis=3)
out = tf.stack([avg_out, max_out], axis=3) # 创建一个维度,拼接到一起concat。
out = self.conv1(out)
return out
class BasicBlock(layers.Layer):
def __init__(self, filter_num, stride=1):
super(BasicBlock, self).__init__()
# self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same', kernel_initializer='he_normal',kernel_regularizer=keras.regularizers.l2(5e-4))
self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same',kernel_regularizer=regularizers.l2(0.0001)) #kernel_initializer='he_normal',
self.bn1 = layers.BatchNormalization()
self.relu = layers.Activation('relu')
self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same',kernel_regularizer=regularizers.l2(0.0001))
self.bn2 = layers.BatchNormalization()
############################### 注意力机制 ###############################
self.ca = ChannelAttention(filter_num)
self.sa = SpatialAttention()
if stride != 1:
self.downsample = Sequential()
self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
else:
self.downsample = lambda x:x
def call(self, inputs, training=None):
# [b, h, w, c]
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
############################### 注意力机制 ###############################
out = self.ca(out) * out
out = self.sa(out) * out
identity = self.downsample(inputs)
output = layers.add([out, identity])
output = tf.nn.relu(output)
return output
######################################
class build_resblock(keras.Model):
def __init__(self, filter_num, stride):
super(build_resblock, self).__init__()
self.BasicBlock1 = BasicBlock(filter_num, stride)
self.BasicBlock2 = BasicBlock(filter_num, stride=1)
def call(self,blocks):
res_blocks = Sequential()
res_blocks.add(self.BasicBlock1)
for _ in range(1, blocks):
res_blocks.add(self.BasicBlock2)
return res_blocks
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
######################################
class ResNet(keras.Model):
def __init__(self, layer_dims, num_classes=16): # [2, 2, 2, 2]
super(ResNet, self).__init__()
self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
])
self.layer1 = self.build_resblock(64, layer_dims[0])
self.layer2 = self.build_resblock(128, layer_dims[1], stride=1)
self.layer3 = self.build_resblock(256, layer_dims[2], stride=1)
self.layer4 = self.build_resblock(512, layer_dims[3], stride=1)
# output: [b, 512, h, w],
self.avgpool = layers.GlobalAveragePooling2D()
self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def resnet18():
return ResNet([2, 2, 2, 2],num_classes=9)
def resnet34():
return ResNet([3, 4, 6, 3],num_classes=9)
########################### pp2主模型 ########################################
class pp2_model(keras.Model):
def __init__(self,filters_num,layer_dims,num_classes,dropout_rate):
super(pp2_model, self).__init__()
self.conv1 = layers.Conv3D(filters_num[0],kernel_size=(3,3,7),padding='same') # filters_num = 8
self.bn1 = layers.BatchNormalization()
self.relu1 = layers.Activation('relu')
self.conv2 = layers.Conv3D(filters_num[1],kernel_size=(3,3,5),padding='same') # filters_num = 16
self.bn2 = layers.BatchNormalization()
self.relu2 = layers.Activation('relu')
self.conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3), padding='same') # filters_num = 32
self.bn3 = layers.BatchNormalization()
self.relu3 = layers.Activation('relu')
# self.reshape = layers.Reshape()
self.conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same') # filters_num = 64
self.bn4 = layers.BatchNormalization()
self.relu4 = layers.Activation('relu')
self.conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same') # filters_num = **
self.bn5 = layers.BatchNormalization()
self.relu5 = layers.Activation('relu')
self.dpout = layers.Dropout(dropout_rate)
self.layer1 = self.build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
# output: [b, 512, h, w],
# self.fc1 = layers.Flatten()
self.avgpool = layers.GlobalAveragePooling2D()
self.fc2 = layers.Dense(filters_num[7],activation='relu')
self.fc3 = layers.Dense(filters_num[6],activation='relu')
self.fc4 = layers.Dense(num_classes)
def call(self,inputs,training=None):
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu3(out)
# reshape
out = layers.Reshape((out.shape[1],out.shape[2],out.shape[3] * out.shape[4]))(out)
out = self.conv4(out)
out = self.bn4(out)
out = self.relu4(out)
out = self.dpout(out)
out = self.conv5(out)
out = self.bn5(out)
out = self.dpout(out)
out = self.relu5(out)
x = self.layer1(out)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc2(x)
x = self.dpout(x)
x = self.fc3(x)
x = self.fc4(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
class ResNet_block(keras.Model):
def __init__(self, layer_dims,filters_num): # [2, 2, 2, 2]
super(ResNet_block, self).__init__()
#
# self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
# layers.BatchNormalization(),
# layers.Activation('relu'),
# layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
# ])
self.layer1 = self.build_resblock(filters_num[0], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[1], layer_dims[1], stride=1) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[2], layer_dims[2], stride=1) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[3], layer_dims[3], stride=1) # filters_num = 512
# output: [b, 512, h, w],
# self.avgpool = layers.GlobalAveragePooling2D()
# self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
# x = self.stem(inputs)
x1 = self.layer1(inputs)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
# [b, c]
# x = self.avgpool(x)
# [b, 100]
# x = self.fc(x)
return x2,x4
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def network_up(input_layer_up,filters_num,dropout_rate,Block_res):
# input_layer = Input(input_shape)
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8
conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3), padding='same',
kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) #kernel_initializer='he_normal',
# conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1)
# conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1)
conv1_bn = layers.BatchNormalization()(conv1)
conv1_relu = layers.Activation('relu')(conv1_bn)
# conv1_relu = Dropout(0.5)(conv1_relu)
# conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu)
# conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv1_relu) # filters_num = 16
conv2_bn = layers.BatchNormalization()(conv2)
conv2_relu = layers.Activation('relu')(conv2_bn)
# conv2_relu = Dropout(0.5)(conv2_relu)
# conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu)
conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv2_relu) # filters_num = 32
conv3_bn = layers.BatchNormalization()(conv3)
conv3_relu = layers.Activation('relu')(conv3_bn)
# conv3_relu = Dropout(0.5)(conv3_relu)
# conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu)
conv3_relu_reshape = layers.Reshape((conv3_relu.shape[1],conv3_relu.shape[2],conv3_relu.shape[3]*conv3_relu.shape[4]))(conv3_relu)
conv3_relu_reshape = Dropout(0.5)(conv3_relu_reshape)
##################第二个尺度#########################
# conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv11_bn = layers.BatchNormalization()(conv11)
# conv11_relu = layers.Activation('relu')(conv11_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16
# conv22_bn = layers.BatchNormalization()(conv22)
# conv22_relu = layers.Activation('relu')(conv22_bn)
#
# conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32
# conv33_bn = layers.BatchNormalization()(conv33)
# conv33_relu = layers.Activation('relu')(conv33_bn)
#
# conv33_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu)
####################################################
# conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv111_bn = layers.BatchNormalization()(conv111)
# conv111_relu = layers.Activation('relu')(conv111_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16
# conv222_bn = layers.BatchNormalization()(conv222)
# conv222_relu = layers.Activation('relu')(conv222_bn)
#
# conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32
# conv333_bn = layers.BatchNormalization()(conv333)
# conv333_relu = layers.Activation('relu')(conv333_bn)
#
# conv333_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu)
#################concatenate########################
# conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape])
#########################################
conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv3_relu_reshape) # filters_num = 64
conv4_bn = layers.BatchNormalization()(conv4)
conv4_relu = layers.Activation('relu')(conv4_bn)
# conv4_relu = Dropout(0.5)(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu)
conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv4_relu) # filters_num = **
conv5_bn = layers.BatchNormalization()(conv5)
conv5_relu = layers.Activation('relu')(conv5_bn)
# conv5_relu = Dropout(0.5)(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu)
# conv5_dpout = layers.Dropout(dropout_rate)(conv5)
# conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout)
outputs2,outputs4 = Block_res(conv5_relu)
return conv5,outputs2,outputs4
# layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
# layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
# layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
# layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
| 39.02521
| 191
| 0.596307
| 2,285
| 18,576
| 4.650766
| 0.0814
| 0.063988
| 0.035758
| 0.049591
| 0.620307
| 0.546532
| 0.52122
| 0.486873
| 0.465606
| 0.455914
| 0
| 0.057896
| 0.242194
| 18,576
| 475
| 192
| 39.107368
| 0.697024
| 0.317345
| 0
| 0.286364
| 0
| 0
| 0.011246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.027273
| 0.013636
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76d39eed393350171c588f61022e00d384bb01c9
| 53,515
|
py
|
Python
|
third_party/google-endpoints/dogpile/cache/region.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/google-endpoints/dogpile/cache/region.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
third_party/google-endpoints/dogpile/cache/region.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
from __future__ import with_statement
from .. import Lock, NeedRegenerationException
from ..util import NameRegistry
from . import exception
from ..util import PluginLoader, memoized_property, coerce_string_conf
from .util import function_key_generator, function_multi_key_generator
from .api import NO_VALUE, CachedValue
from .proxy import ProxyBackend
from ..util import compat
import time
import datetime
from numbers import Number
from functools import wraps
import threading
_backend_loader = PluginLoader("dogpile.cache")
register_backend = _backend_loader.register
from . import backends # noqa
value_version = 1
"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""
class RegionInvalidationStrategy(object):
"""Region invalidation strategy interface
Implement this interface and pass implementation instance
to :meth:`.CacheRegion.configure` to override default region invalidation.
Example::
class CustomInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._soft_invalidated = None
self._hard_invalidated = None
def invalidate(self, hard=None):
if hard:
self._soft_invalidated = None
self._hard_invalidated = time.time()
else:
self._soft_invalidated = time.time()
self._hard_invalidated = None
def is_invalidated(self, timestamp):
return ((self._soft_invalidated and
timestamp < self._soft_invalidated) or
(self._hard_invalidated and
timestamp < self._hard_invalidated))
def was_hard_invalidated(self):
return bool(self._hard_invalidated)
def is_hard_invalidated(self, timestamp):
return (self._hard_invalidated and
timestamp < self._hard_invalidated)
def was_soft_invalidated(self):
return bool(self._soft_invalidated)
def is_soft_invalidated(self, timestamp):
return (self._soft_invalidated and
timestamp < self._soft_invalidated)
The custom implementation is injected into a :class:`.CacheRegion`
at configure time using the
:paramref:`.CacheRegion.configure.region_invalidator` parameter::
region = CacheRegion()
region = region.configure(region_invalidator=CustomInvalidationStrategy())
Invalidation strategies that wish to have access to the
:class:`.CacheRegion` itself should construct the invalidator given the
region as an argument::
class MyInvalidator(RegionInvalidationStrategy):
def __init__(self, region):
self.region = region
# ...
# ...
region = CacheRegion()
region = region.configure(region_invalidator=MyInvalidator(region))
.. versionadded:: 0.6.2
.. seealso::
:paramref:`.CacheRegion.configure.region_invalidator`
"""
def invalidate(self, hard=True):
"""Region invalidation.
:class:`.CacheRegion` propagated call.
The default invalidation system works by setting
a current timestamp (using ``time.time()``) to consider all older
timestamps effectively invalidated.
"""
raise NotImplementedError()
def is_hard_invalidated(self, timestamp):
"""Check timestamp to determine if it was hard invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in hard mode.
"""
raise NotImplementedError()
def is_soft_invalidated(self, timestamp):
"""Check timestamp to determine if it was soft invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in soft mode.
"""
raise NotImplementedError()
def is_invalidated(self, timestamp):
"""Check timestamp to determine if it was invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time.
"""
raise NotImplementedError()
def was_soft_invalidated(self):
"""Indicate the region was invalidated in soft mode.
:return: Boolean. True if region was invalidated in soft mode.
"""
raise NotImplementedError()
def was_hard_invalidated(self):
"""Indicate the region was invalidated in hard mode.
:return: Boolean. True if region was invalidated in hard mode.
"""
raise NotImplementedError()
class DefaultInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._is_hard_invalidated = None
self._invalidated = None
def invalidate(self, hard=True):
self._is_hard_invalidated = bool(hard)
self._invalidated = time.time()
def is_invalidated(self, timestamp):
return (self._invalidated is not None and
timestamp < self._invalidated)
def was_hard_invalidated(self):
return self._is_hard_invalidated is True
def is_hard_invalidated(self, timestamp):
return self.was_hard_invalidated() and self.is_invalidated(timestamp)
def was_soft_invalidated(self):
return self._is_hard_invalidated is False
def is_soft_invalidated(self, timestamp):
return self.was_soft_invalidated() and self.is_invalidated(timestamp)
class CacheRegion(object):
"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
.. seealso::
:func:`.function_key_generator` - default key generator
:func:`.kwarg_function_key_generator` - optional gen that also
uses keyword arguments
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""
def __init__(
self,
name=None,
function_key_generator=function_key_generator,
function_multi_key_generator=function_multi_key_generator,
key_mangler=None,
async_creation_runner=None,
):
"""Construct a new :class:`.CacheRegion`."""
self.name = name
self.function_key_generator = function_key_generator
self.function_multi_key_generator = function_multi_key_generator
self.key_mangler = self._user_defined_key_mangler = key_mangler
self.async_creation_runner = async_creation_runner
self.region_invalidator = DefaultInvalidationStrategy()
def configure(
self, backend,
expiration_time=None,
arguments=None,
_config_argument_dict=None,
_config_prefix=None,
wrap=None,
replace_existing_backend=False,
region_invalidator=None
):
"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
:param replace_existing_backend: if True, the existing cache backend
will be replaced. Without this flag, an exception is raised if
a backend is already configured.
.. versionadded:: 0.5.7
:param region_invalidator: Optional. Override default invalidation
strategy with custom implementation of
:class:`.RegionInvalidationStrategy`.
.. versionadded:: 0.6.2
"""
if "backend" in self.__dict__ and not replace_existing_backend:
raise exception.RegionAlreadyConfigured(
"This region is already "
"configured with backend: %s. "
"Specify replace_existing_backend=True to replace."
% self.backend)
backend_cls = _backend_loader.load(backend)
if _config_argument_dict:
self.backend = backend_cls.from_config_dict(
_config_argument_dict,
_config_prefix
)
else:
self.backend = backend_cls(arguments or {})
if not expiration_time or isinstance(expiration_time, Number):
self.expiration_time = expiration_time
elif isinstance(expiration_time, datetime.timedelta):
self.expiration_time = int(
compat.timedelta_total_seconds(expiration_time))
else:
raise exception.ValidationError(
'expiration_time is not a number or timedelta.')
if not self._user_defined_key_mangler:
self.key_mangler = self.backend.key_mangler
self._lock_registry = NameRegistry(self._create_mutex)
if getattr(wrap, '__iter__', False):
for wrapper in reversed(wrap):
self.wrap(wrapper)
if region_invalidator:
self.region_invalidator = region_invalidator
return self
def wrap(self, proxy):
''' Takes a ProxyBackend instance or class and wraps the
attached backend. '''
# if we were passed a type rather than an instance then
# initialize it.
if type(proxy) == type:
proxy = proxy()
if not issubclass(type(proxy), ProxyBackend):
raise TypeError("Type %s is not a valid ProxyBackend"
% type(proxy))
self.backend = proxy.wrap(self.backend)
def _mutex(self, key):
return self._lock_registry.get(key)
class _LockWrapper(object):
"""weakref-capable wrapper for threading.Lock"""
def __init__(self):
self.lock = threading.Lock()
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def _create_mutex(self, key):
mutex = self.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper()
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
The default invalidation system works by setting
a current timestamp (using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is also
local to this instance of :class:`.CacheRegion`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""
self.region_invalidator.invalidate(hard)
def configure_from_config(self, config_dict, prefix):
"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""
config_dict = coerce_string_conf(config_dict)
return self.configure(
config_dict["%sbackend" % prefix],
expiration_time=config_dict.get(
"%sexpiration_time" % prefix, None),
_config_argument_dict=config_dict,
_config_prefix="%sarguments." % prefix,
wrap=config_dict.get(
"%swrap" % prefix, None),
)
@memoized_property
def backend(self):
raise exception.RegionNotConfigured(
"No backend is configured on this region.")
@property
def is_configured(self):
"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""
return 'backend' in self.__dict__
def get(self, key, expiration_time=None, ignore_expiration=False):
"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
"""
if self.key_mangler:
key = self.key_mangler(key)
value = self.backend.get(key)
value = self._unexpired_value_fn(
expiration_time, ignore_expiration)(value)
return value.payload
def _unexpired_value_fn(self, expiration_time, ignore_expiration):
if ignore_expiration:
return lambda value: value
else:
if expiration_time is None:
expiration_time = self.expiration_time
current_time = time.time()
def value_fn(value):
if value is NO_VALUE:
return value
elif expiration_time is not None and \
current_time - value.metadata["ct"] > expiration_time:
return NO_VALUE
elif self.region_invalidator.is_invalidated(
value.metadata["ct"]):
return NO_VALUE
else:
return value
return value_fn
def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""
if not keys:
return []
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
backend_values = self.backend.get_multi(keys)
_unexpired_value_fn = self._unexpired_value_fn(
expiration_time, ignore_expiration)
return [
value.payload if value is not NO_VALUE else value
for value in
(
_unexpired_value_fn(value) for value in
backend_values
)
]
def get_or_create(
self, key, creator, expiration_time=None, should_cache_fn=None):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""
orig_key = key
if self.key_mangler:
key = self.key_mangler(key)
def get_value():
value = self.backend.get(key)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata["ct"])):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or \
should_cache_fn(created_value):
self.backend.set(key, value)
return value.payload, value.metadata["ct"]
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
if self.async_creation_runner:
def async_creator(mutex):
return self.async_creation_runner(
self, orig_key, creator, mutex)
else:
async_creator = None
with Lock(
self._mutex(key),
gen_value,
get_value,
expiration_time,
async_creator) as value:
return value
def get_or_create_multi(
self, keys, creator, expiration_time=None, should_cache_fn=None):
"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
that modifies values, take note this function invokes
``.set_multi()`` for newly generated values using the same values it
returns to the calling function. A correct implementation of
``.set_multi()`` will not modify values in-place on the submitted
``mapping`` dict.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
def get_value(key):
value = values.get(key, NO_VALUE)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata['v'])):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
return value.payload, 0
else:
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
raise NotImplementedError()
def async_creator(key, mutex):
mutexes[key] = mutex
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
mutexes = {}
sorted_unique_keys = sorted(set(keys))
if self.key_mangler:
mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
else:
mangled_keys = sorted_unique_keys
orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
for orig_key, mangled_key in orig_to_mangled.items():
with Lock(
self._mutex(mangled_key),
gen_value,
lambda: get_value(mangled_key),
expiration_time,
async_creator=lambda mutex: async_creator(orig_key, mutex)
):
pass
try:
if mutexes:
# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
keys_to_get = sorted(mutexes)
new_values = creator(*keys_to_get)
values_w_created = dict(
(orig_to_mangled[k], self._value(v))
for k, v in zip(keys_to_get, new_values)
)
if not should_cache_fn:
self.backend.set_multi(values_w_created)
else:
self.backend.set_multi(dict(
(k, v)
for k, v in values_w_created.items()
if should_cache_fn(v[0])
))
values.update(values_w_created)
return [values[orig_to_mangled[k]].payload for k in keys]
finally:
for mutex in mutexes.values():
mutex.release()
def _value(self, value):
"""Return a :class:`.CachedValue` given a value."""
return CachedValue(
value,
{
"ct": time.time(),
"v": value_version
})
def set(self, key, value):
"""Place a new value in the cache under the given key."""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.set(key, self._value(value))
def set_multi(self, mapping):
"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""
if not mapping:
return
if self.key_mangler:
mapping = dict((
self.key_mangler(k), self._value(v))
for k, v in mapping.items())
else:
mapping = dict((k, self._value(v)) for k, v in mapping.items())
self.backend.set_multi(mapping)
def delete(self, key):
"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.delete(key)
def delete_multi(self, keys):
"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
self.backend.delete_multi(keys)
def cache_on_arguments(
self, namespace=None,
expiration_time=None,
should_cache_fn=None,
to_str=compat.string_type,
function_key_generator=None):
"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_key_generator is None:
function_key_generator = self.function_key_generator
def decorator(fn):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, fn)
else:
key_generator = function_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
key = key_generator(*arg, **kw)
@wraps(fn)
def creator():
return fn(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, creator, timeout,
should_cache_fn)
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
self.delete(key)
def set_(value, *arg, **kw):
key = key_generator(*arg, **kw)
self.set(key, value)
def get(*arg, **kw):
key = key_generator(*arg, **kw)
return self.get(key)
def refresh(*arg, **kw):
key = key_generator(*arg, **kw)
value = fn(*arg, **kw)
self.set(key, value)
return value
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
decorate.original = fn
return decorate
return decorator
def cache_multi_on_arguments(
self, namespace=None, expiration_time=None,
should_cache_fn=None,
asdict=False, to_str=compat.string_type,
function_multi_key_generator=None):
"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def decorator(fn):
key_generator = function_multi_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(fn)
def creator(*keys_to_create):
return fn(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
return result
def invalidate(*arg):
keys = key_generator(*arg)
self.delete_multi(keys)
def set_(mapping):
keys = list(mapping)
gen_keys = key_generator(*keys)
self.set_multi(dict(
(gen_key, mapping[key])
for gen_key, key
in zip(gen_keys, keys))
)
def get(*arg):
keys = key_generator(*arg)
return self.get_multi(keys)
def refresh(*arg):
keys = key_generator(*arg)
values = fn(*arg)
if asdict:
self.set_multi(
dict(zip(keys, [values[a] for a in arg]))
)
return values
else:
self.set_multi(
dict(zip(keys, values))
)
return values
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
return decorate
return decorator
def make_region(*arg, **kw):
"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""
return CacheRegion(*arg, **kw)
| 36.429544
| 82
| 0.606652
| 6,384
| 53,515
| 4.955514
| 0.109962
| 0.03673
| 0.006954
| 0.009483
| 0.403117
| 0.348464
| 0.307529
| 0.267354
| 0.232899
| 0.220382
| 0
| 0.004819
| 0.321461
| 53,515
| 1,468
| 83
| 36.45436
| 0.866408
| 0.558105
| 0
| 0.350993
| 0
| 0
| 0.022641
| 0.001531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134658
| false
| 0.002208
| 0.033113
| 0.022075
| 0.282561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76d73eb99aeff1e081d5c5783ce96e09453f8979
| 4,046
|
py
|
Python
|
tests/unit/detection/test_detection_notebooks.py
|
titipakorn/computervision-recipes
|
815435763c0cdce991b7511fd8d39f71c64ccea8
|
[
"MIT"
] | 2
|
2020-03-03T15:29:50.000Z
|
2022-02-21T12:45:24.000Z
|
tests/unit/detection/test_detection_notebooks.py
|
titipakorn/computervision-recipes
|
815435763c0cdce991b7511fd8d39f71c64ccea8
|
[
"MIT"
] | null | null | null |
tests/unit/detection/test_detection_notebooks.py
|
titipakorn/computervision-recipes
|
815435763c0cdce991b7511fd8d39f71c64ccea8
|
[
"MIT"
] | 2
|
2020-05-06T14:07:00.000Z
|
2022-03-21T19:54:32.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This test is based on the test suite implemented for Recommenders project
# https://github.com/Microsoft/Recommenders/tree/master/tests
import papermill as pm
import pytest
import scrapbook as sb
from utils_cv.common.data import unzip_url
from utils_cv.detection.data import Urls
# Unless manually modified, python3 should be
# the name of the current jupyter kernel
# that runs on the activated conda environment
KERNEL_NAME = "python3"
OUTPUT_NOTEBOOK = "output.ipynb"
@pytest.mark.notebooks
def test_00_notebook_run(detection_notebooks):
notebook_path = detection_notebooks["00"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["detection_bounding_box"].data) > 0
@pytest.mark.gpu
@pytest.mark.notebooks
def test_01_notebook_run(detection_notebooks, tiny_od_data_path):
notebook_path = detection_notebooks["01"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
notebook_path = detection_notebooks["02"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_mask_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_03_notebook_run(
detection_notebooks, tiny_od_keypoint_data_path, tmp_session
):
notebook_path = detection_notebooks["03"]
data_path2 = unzip_url(
Urls.fridge_objects_keypoint_top_bottom_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
IM_SIZE=100,
EPOCHS=1,
DATA_PATH=tiny_od_keypoint_data_path,
DATA_PATH2=data_path2,
THRESHOLD=0.01,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["keypoints"].data) == len(
nb_output.scraps["bboxes"].data
)
@pytest.mark.gpu
@pytest.mark.notebooks
def test_12_notebook_run(
detection_notebooks, tiny_od_data_path, tiny_ic_negatives_path
):
notebook_path = detection_notebooks["12"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
NEG_DATA_PATH=tiny_ic_negatives_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["valid_accs"].data) == 1
assert 5 <= len(nb_output.scraps["hard_im_scores"].data) <= 10
| 29.532847
| 75
| 0.686357
| 525
| 4,046
| 4.935238
| 0.24381
| 0.043227
| 0.04863
| 0.045928
| 0.672327
| 0.63296
| 0.563489
| 0.563489
| 0.500193
| 0.500193
| 0
| 0.017578
| 0.226644
| 4,046
| 136
| 76
| 29.75
| 0.810483
| 0.086752
| 0
| 0.625
| 0
| 0
| 0.04665
| 0.020071
| 0
| 0
| 0
| 0
| 0.107143
| 1
| 0.044643
| false
| 0
| 0.044643
| 0
| 0.089286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76d787aa0fb3effb59ce8288a064c7de0d40a573
| 524
|
py
|
Python
|
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py
|
ismailkocdemir/mmdetection
|
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
|
[
"Apache-2.0"
] | null | null | null |
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py
|
ismailkocdemir/mmdetection
|
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
|
[
"Apache-2.0"
] | null | null | null |
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py
|
ismailkocdemir/mmdetection
|
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../retinanet_r50_fpn_1x_coco.py',
'../../_base_/datasets/hdr_detection_minmax_glob_gamma.py',
]
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None) # dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[10])
runner = dict(
type='EpochBasedRunner', max_epochs=20)
| 26.2
| 88
| 0.694656
| 80
| 524
| 4.275
| 0.6875
| 0.046784
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067568
| 0.152672
| 524
| 19
| 89
| 27.578947
| 0.702703
| 0.198473
| 0
| 0
| 0
| 0
| 0.279518
| 0.209639
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76da4334b5fdeaaf4557e3c74b65d210265f77b8
| 14,585
|
py
|
Python
|
report_writer/report_writer.py
|
DoubleBridges/door-order-parser
|
cd652922006d84a34143ded325e79d141343521d
|
[
"MIT"
] | null | null | null |
report_writer/report_writer.py
|
DoubleBridges/door-order-parser
|
cd652922006d84a34143ded325e79d141343521d
|
[
"MIT"
] | null | null | null |
report_writer/report_writer.py
|
DoubleBridges/door-order-parser
|
cd652922006d84a34143ded325e79d141343521d
|
[
"MIT"
] | null | null | null |
from reportlab.lib.units import inch
from reportlab.platypus import SimpleDocTemplate, Spacer
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
from reportlab.platypus.flowables import Flowable
def generate_order(job, path, door_style, doors=[], drawers=[]):
PAGE_HEIGHT = defaultPageSize[1]
PAGE_WIDTH = defaultPageSize[0]
LEFT_MARGIN = 30
LINE_HEIGHT = 18
BACKGROUND_COLOR = (33 / 255, 80 / 255, 156 / 255)
CURSOR_HEIGHT = PAGE_HEIGHT - 60
INPUT_HEIGHT = LINE_HEIGHT - (LINE_HEIGHT * 0.1)
SPECIES = door_style.species
STYLE = door_style.name
INSIDE_PROFILE = door_style.inside_profile
OUTSIDE_PROFILE = door_style.outside_profile
TOTAL_DRS = len(doors)
TOTAL_DWRS = len(drawers)
def myFirstPage(c, doc):
cursor = CURSOR_HEIGHT
c.saveState()
c.setStrokeColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(
LEFT_MARGIN, PAGE_HEIGHT - 40, PAGE_WIDTH - (LEFT_MARGIN * 2), 24, fill=1
)
c.setFillColorRGB(1, 1, 1)
c.setFont("Helvetica-Bold", 16)
c.drawCentredString(PAGE_WIDTH / 2.0, PAGE_HEIGHT - 34, "DOOR ORDER FORM")
c.setFont("Helvetica", 12)
c.setFillColorRGB(0, 0, 0)
c.drawString(LEFT_MARGIN, cursor, f"Customer : JS Designs Shop, LLC")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f"Order Date : {job.order_date}",
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"PO # : {job.name}-{STYLE}-{SPECIES}")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Delivery Date : ASAP"
)
cursor -= LINE_HEIGHT
c.setFont("Helvetica-Bold", 12)
c.drawString(LEFT_MARGIN, cursor, f"Door Style : {STYLE}")
c.setFont("Helvetica", 12)
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Phone : 901-853-7568"
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Panel : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 40,
y=cursor - 4,
name="Panel",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 60,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, "Comments : ")
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Wood Type : {SPECIES}")
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Inside Profile : {INSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 78,
# y=cursor - 4,
# name="inside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 98,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Outside Profile : {OUTSIDE_PROFILE}")
# c.acroForm.textfield(
# x=LEFT_MARGIN + 88,
# y=cursor - 4,
# name="outside_profile",
# value=" N/A ",
# height=INPUT_HEIGHT,
# width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 108,
# borderWidth=0,
# # fillColor=([1, 1, 1]),
# relative=True,
# )
c.line(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
PAGE_WIDTH - LEFT_MARGIN,
cursor,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Stile/Rails : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 62,
y=cursor - 4,
name="stiles_rails",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 82,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica-Bold", 12)
c.drawString((PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Drawer Fronts : ")
c.acroForm.textfield(
x=LEFT_MARGIN + 375,
y=cursor - 4,
name="drawer_fronts",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 92,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.setFont("Helvetica", 12)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Boring For Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2), cursor, f"Outside Profile : "
)
c.acroForm.textfield(
x=LEFT_MARGIN + 370,
y=cursor - 4,
name="out_profile",
value=" N/A ",
height=INPUT_HEIGHT,
width=(PAGE_WIDTH / 2) - LEFT_MARGIN - (LEFT_MARGIN / 2) - 87,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= LINE_HEIGHT
c.drawString(LEFT_MARGIN, cursor, f"Add Hinges : No")
c.drawString(
(PAGE_WIDTH / 2) + (LEFT_MARGIN / 2),
cursor,
f" 5 PC Front: Slab:",
)
c.acroForm.textfield(
x=LEFT_MARGIN + 350,
y=cursor - 4,
name="5_pc_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
c.acroForm.textfield(
x=LEFT_MARGIN + 430,
y=cursor - 4,
name="slab_front",
value=" N/A ",
height=INPUT_HEIGHT,
width=30,
borderWidth=0,
# fillColor=([1, 1, 1]),
relative=True,
)
cursor -= 12
c.setFont("Times-Italic", 10)
c.drawString(
LEFT_MARGIN,
cursor,
f"Boring not available in arched doors, applied mould doors",
)
cursor -= 10
c.drawString(
LEFT_MARGIN,
cursor,
f"and raised bead profile mitered doors",
)
cursor -= 14
c.setFont("Times-BoldItalic", 12)
c.drawString(
LEFT_MARGIN, cursor, f'Cullman will not bore any door with 2" stiles'
)
cursor -= 20
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, f"Total Doors: {TOTAL_DRS}")
c.drawCentredString(
((PAGE_WIDTH / 4) * 3) + 10, cursor, f"Total Drawer Fronts: {TOTAL_DWRS}"
)
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page 1 of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
def myLaterPages(c, doc):
cursor = PAGE_HEIGHT - 54
c.saveState()
c.setFont("Helvetica-Bold", 14)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.drawCentredString((PAGE_WIDTH / 4) + 30, cursor, "Doors")
c.drawCentredString(((PAGE_WIDTH / 4) * 3) + 10, cursor, "Drawer Fronts")
cursor -= 24
c.setStrokeColorRGB(0, 0, 0)
c.setFillColorRGB(BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2])
c.rect(LEFT_MARGIN + 38, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 98, cursor, 170, 20, fill=1)
c.rect(LEFT_MARGIN + 308, cursor, 60, 20, fill=1)
c.rect(LEFT_MARGIN + 368, cursor, 170, 20, fill=1)
c.setFont("Helvetica-Bold", 12)
c.setFillColorRGB(1, 1, 1)
string_center = LEFT_MARGIN + 68
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
string_center += 155
c.drawCentredString(string_center, cursor + 5, "Qty")
string_center += 115
c.drawCentredString(string_center, cursor + 5, "Width X Height")
c.setFont("Helvetica", 9)
c.setFillColorRGB(0, 0, 0)
c.drawCentredString(
PAGE_WIDTH / 2, 40, f"Page {doc.page} of {job.name}-{STYLE}-{SPECIES}"
)
c.drawCentredString(
PAGE_WIDTH / 2,
20,
'Reminder : Any doors 46" and over in height will automatically receive a horizontal center rail unless otherwise noted.',
)
c.restoreState()
class OrderEntry(Flowable):
"""Draws table entry for each item in list of door sizes."""
def __init__(
self,
xoffset=0,
height=20,
dr_qty="",
dr_size="",
dwr_qty="",
dwr_size="",
index=0,
):
Flowable.__init__(self)
self.dr_qty = dr_qty
self.dr_size = dr_size
self.dwr_qty = dwr_qty
self.dwr_size = dwr_size
self.index = index
self.height = height
self.idx_box_x = xoffset
self.idx_box_width = 40
self.string_center = xoffset + (self.idx_box_width / 2)
self.qty_box_x = self.idx_box_width + xoffset
self.qty_box_width = 60
self.size_box_x = self.qty_box_width - 10
self.size_box_width = 170
self.second_column_offset = 270
def draw(self):
# Door
self.canv.setStrokeColorRGB(0, 0, 0)
self.canv.setFillColorRGB(
BACKGROUND_COLOR[0], BACKGROUND_COLOR[1], BACKGROUND_COLOR[2]
)
self.canv.rect(self.idx_box_x, 0, self.idx_box_width, self.height, fill=1)
self.canv.setFillColorRGB(1, 1, 1)
self.canv.setFont("Helvetica", 12)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, str(self.index)
)
self.canv.setFillColorRGB(0, 0, 0)
self.canv.rect(self.qty_box_x, 0, self.qty_box_width, self.height)
self.string_center += (self.idx_box_width / 2) + (self.qty_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_qty
)
self.canv.rect(self.size_box_x, 0, self.size_box_width, self.height)
self.string_center += (self.qty_box_width / 2) + (self.size_box_width / 2)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dr_size
)
# Drawer
if self.dwr_qty != "" and self.dwr_size != "":
self.canv.rect(
self.second_column_offset + self.qty_box_x,
0,
self.qty_box_width,
self.height,
)
self.string_center += 155
self.canv.drawCentredString(
self.string_center,
0.25 * self.height,
self.dwr_qty,
)
self.canv.rect(
self.second_column_offset + self.size_box_x,
0,
self.size_box_width,
self.height,
)
self.string_center += (self.qty_box_width / 2) + (
self.size_box_width / 2
)
self.canv.drawCentredString(
self.string_center, 0.25 * self.height, self.dwr_size
)
def build_pdf(path, name, door_list, drawer_list):
doc = SimpleDocTemplate(f"{path}/{name}-{STYLE}.pdf")
Story = [Spacer(1, 3.11 * inch)]
num_of_doors = len(door_list)
num_of_drawers = len(drawer_list)
num_of_entries = max(num_of_doors, num_of_drawers)
for i in range(0, num_of_entries):
try:
door_qty, door_size = door_list[i]["qty"], door_list[i]["size"]
except IndexError:
door_qty, door_size = "", ""
try:
drawer_qty, drawer_size = drawer_list[i]["qty"], drawer_list[i]["size"]
except IndexError:
drawer_qty, drawer_size = "", ""
p = OrderEntry(
xoffset=-50,
dr_qty=door_qty,
dr_size=door_size,
dwr_qty=drawer_qty,
dwr_size=drawer_size,
index=i + 1,
)
Story.append(p)
doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
build_pdf(path, job.name, doors, drawers)
| 37.590206
| 134
| 0.53459
| 1,679
| 14,585
| 4.458606
| 0.130435
| 0.078814
| 0.028052
| 0.029923
| 0.684077
| 0.65175
| 0.626236
| 0.600454
| 0.563452
| 0.499466
| 0
| 0.045902
| 0.351731
| 14,585
| 387
| 135
| 37.687339
| 0.745849
| 0.051217
| 0
| 0.472303
| 0
| 0.005831
| 0.095066
| 0.007898
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017493
| false
| 0
| 0.014577
| 0
| 0.034985
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76dbfabe1368ceb4eba242e1e280877abf784832
| 12,063
|
py
|
Python
|
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import IntEnum
from colosseum.utils.random_vars import deterministic, get_dist
try:
from functools import cached_property
except:
from backports.cached_property import cached_property
from typing import Any, Dict, List, Tuple, Type, Union
import numpy as np
from scipy.stats import beta, rv_continuous
from colosseum.mdps import MDP
from colosseum.mdps.base_mdp import NextStateSampler
from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous
from colosseum.utils.mdps import check_distributions
class MiniGridDoorKeyAction(IntEnum):
"""The action available in the MiniGridDoorKey MDP."""
MoveForward = 0
TurnRight = 1
TurnLeft = 2
PickObject = 3
DropObject = 4
UseObject = 5
class MiniGridDoorKeyDirection(IntEnum):
"""The possible agent direction in the MiniGridDoorKey MDP."""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
@dataclass(frozen=True)
class MiniGridDoorKeyNode:
X: int
Y: int
Dir: MiniGridDoorKeyDirection
XKey: int
YKey: int
IsDoorOpened: bool
def __str__(self):
return f"X={self.X},Y={self.Y},Dir={MiniGridDoorKeyDirection(self.Dir).name},XKey={self.XKey},YKey={self.YKey},IsDoorOpened{self.IsDoorOpened}"
class MiniGridDoorKeyMDP(MDP):
@staticmethod
def testing_parameters() -> Dict[str, Tuple]:
t_params = MDP.testing_parameters()
t_params["size"] = (3, 5, 7)
t_params["make_reward_stochastic"] = (True, False)
t_params["n_starting_states"] = (1, 4)
return t_params
@staticmethod
def get_node_class() -> Type[MiniGridDoorKeyNode]:
return MiniGridDoorKeyNode
def __init__(
self,
seed: int,
size: int,
randomize_actions: bool = True,
lazy: float = None,
make_reward_stochastic=False,
n_starting_states: int = 2,
optimal_distribution: Union[Tuple, rv_continuous] = None,
other_distribution: Union[Tuple, rv_continuous] = None,
**kwargs,
):
"""
Parameters
----------
seed : int
the seed used for sampling rewards and next states.
randomize_actions : bool, optional
whether the effect of the actions changes for every node. It is particularly important to set this value to
true when doing experiments to avoid immediately reaching highly rewarding states in some MDPs by just
selecting the same action repeatedly. By default, it is set to true.
lazy : float
the probability of an action not producing any effect on the MDP.
size : int
the size of the grid.
make_reward_stochastic : bool, optional
checks whether the rewards are to be made stochastic. By default, it is set to False.
n_starting_states : int, optional
the number of states in the starting distribution. By default, it is set to two.
optimal_distribution : Union[Tuple, rv_continuous], optional
The distribution of the highly rewarding state. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
other_distribution : Union[Tuple, rv_continuous]
The distribution of the non highly rewarding states. It can be either passed as a tuple containing Beta parameters
or as a rv_continuous object.
"""
if type(optimal_distribution) == tuple:
optimal_distribution = get_dist(
optimal_distribution[0], optimal_distribution[1:]
)
if type(other_distribution) == tuple:
other_distribution = get_dist(other_distribution[0], other_distribution[1:])
self.n_starting_states = n_starting_states
self.size = size
self.make_reward_stochastic = make_reward_stochastic
dists = [
optimal_distribution,
other_distribution,
]
if dists.count(None) == 0:
self.optimal_distribution = optimal_distribution
self.other_distribution = other_distribution
else:
if make_reward_stochastic:
self.other_distribution = beta(1, size ** 2 - 1)
self.optimal_distribution = beta(size ** 2 - 1, 1)
else:
self.optimal_distribution = deterministic(1.0)
self.other_distribution = deterministic(0.0)
super().__init__(
seed=seed,
randomize_actions=randomize_actions,
lazy=lazy,
**kwargs,
)
@property
def parameters(self) -> Dict[str, Any]:
return {
**super(MiniGridDoorKeyMDP, self).parameters,
**dict(
size=self.size,
n_starting_states=self.n_starting_states,
optimal_distribution=self.optimal_distribution,
other_distribution=self.other_distribution,
),
}
@property
def possible_starting_nodes(self) -> List[MiniGridDoorKeyNode]:
return self._possible_starting_nodes
@cached_property
def coordinates_available(self):
coords = (
MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))
.ravel()
.tolist()
)
for i in range(self.size):
if self.is_wall_horizontal:
coords.remove((i, self.wall_position))
else:
coords.remove((self.wall_position, i))
return tuple(coords)
@property
def num_actions(self):
return len(MiniGridDoorKeyAction)
def _calculate_next_nodes_prms(
self, node: MiniGridDoorKeyNode, action: int
) -> Tuple[Tuple[dict, float], ...]:
newnode_prms = deepcopy(asdict(node))
if action == MiniGridDoorKeyAction.TurnRight:
newnode_prms["Dir"] = (node.Dir + 1) % 4
if action == MiniGridDoorKeyAction.TurnLeft:
newnode_prms["Dir"] = (node.Dir - 1) % 4
if action == MiniGridDoorKeyAction.MoveForward:
if node.Dir == MiniGridDoorKeyDirection.UP:
next_coord = (node.X, node.Y + 1)
if node.Dir == MiniGridDoorKeyDirection.RIGHT:
next_coord = node.X + 1, node.Y
if node.Dir == MiniGridDoorKeyDirection.DOWN:
next_coord = node.X, node.Y - 1
if node.Dir == MiniGridDoorKeyDirection.LEFT:
next_coord = node.X - 1, node.Y
if next_coord in self.coordinates_available or (
node.IsDoorOpened and next_coord == self.door_position
):
newnode_prms["X"], newnode_prms["Y"] = next_coord
if action == MiniGridDoorKeyAction.PickObject:
if node.X == node.XKey and node.Y == node.YKey:
newnode_prms["XKey"] = newnode_prms["YKey"] = -1
if node.XKey == -1 and not node.IsDoorOpened:
if action == MiniGridDoorKeyAction.DropObject:
newnode_prms["XKey"] = node.X
newnode_prms["YKey"] = node.Y
if action == MiniGridDoorKeyAction.UseObject:
if node.Dir == MiniGridDoorKeyDirection.UP:
next_coord = (node.X, node.Y + 1)
if node.Dir == MiniGridDoorKeyDirection.RIGHT:
next_coord = node.X + 1, node.Y
if node.Dir == MiniGridDoorKeyDirection.DOWN:
next_coord = node.X, node.Y - 1
if node.Dir == MiniGridDoorKeyDirection.LEFT:
next_coord = node.X - 1, node.Y
if next_coord == self.door_position:
newnode_prms["IsDoorOpened"] = True
return ((newnode_prms, 1.0),)
def _calculate_reward_distribution(
self, node: Any, action: IntEnum, next_node: Any
) -> rv_continuous:
return (
self.optimal_distribution
if next_node.X == self.goal_position[0]
and next_node.Y == self.goal_position[1]
else self.other_distribution
)
def _check_input_parameters(self):
super(MiniGridDoorKeyMDP, self)._check_input_parameters()
assert self.size >= 3
check_distributions(
[
self.optimal_distribution,
self.other_distribution,
],
self.make_reward_stochastic,
)
def _instantiate_starting_node_sampler(self) -> NextStateSampler:
# noinspection PyAttributeOutsideInit
self.wall_position = self._rng.randint(self.size - 2) + 1
# noinspection PyAttributeOutsideInit
self.is_wall_horizontal = self._rng.rand() > 0.5
if self.is_wall_horizontal:
self.door_position = self._rng.randint(self.size), self.wall_position
else:
self.door_position = self.wall_position, self._rng.randint(self.size)
self.is_goal_before = self._rng.rand() > 0.5
coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0))
goal_positions = []
starting_positions = []
for i, j in coords.ravel():
if (
i < self.wall_position
if self.is_goal_before
else i > self.wall_position
):
goal_positions.append((j, i) if self.is_wall_horizontal else (i, j))
elif (
i > self.wall_position
if self.is_goal_before
else i < self.wall_position
):
starting_positions.append((j, i) if self.is_wall_horizontal else (i, j))
possible_starting_positions = deepcopy(starting_positions)
self._rng.shuffle(goal_positions)
self.goal_position = goal_positions[0]
self._rng.shuffle(starting_positions)
self.start_key_position = starting_positions.pop(0)
starting_positions = [
(x, y, dir)
for x, y in starting_positions
for dir in MiniGridDoorKeyDirection
]
assert self.n_starting_states < len(starting_positions)
self._possible_starting_nodes = [
MiniGridDoorKeyNode(
x,
y,
dir.value,
*self.start_key_position,
False,
)
for x, y, dir in starting_positions
]
return NextStateSampler(
next_states=self._possible_starting_nodes[: self.n_starting_states],
probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)],
seed=self._next_seed(),
)
def calc_grid_repr(self, node: Any) -> np.array:
grid_size = self.size
door_position = self.door_position
wall_position = self.wall_position
is_wall_horizontal = self.is_wall_horizontal
grid = np.zeros((grid_size, grid_size), dtype=str)
grid[:, :] = " "
grid[self.goal_position[1], self.goal_position[0]] = "G"
if self.cur_node.XKey != -1:
grid[self.cur_node.YKey, self.cur_node.XKey] = "K"
for i in range(grid_size):
if not is_wall_horizontal:
grid[i, wall_position] = "W_en"
else:
grid[wall_position, i] = "W_en"
grid[door_position[1], door_position[0]] = (
"O" if self.cur_node.IsDoorOpened else "C"
)
if self.cur_node.Dir == MiniGridDoorKeyDirection.UP:
grid[self.cur_node.Y, self.cur_node.X] = "^"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT:
grid[self.cur_node.Y, self.cur_node.X] = ">"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN:
grid[self.cur_node.Y, self.cur_node.X] = "v"
elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT:
grid[self.cur_node.Y, self.cur_node.X] = "<"
return grid[::-1, :]
| 36.554545
| 151
| 0.608555
| 1,363
| 12,063
| 5.181952
| 0.168745
| 0.015857
| 0.024919
| 0.037378
| 0.281892
| 0.2397
| 0.202322
| 0.188022
| 0.177262
| 0.169333
| 0
| 0.008232
| 0.305148
| 12,063
| 329
| 152
| 36.665654
| 0.834407
| 0.114068
| 0
| 0.141176
| 0
| 0.003922
| 0.021845
| 0.014786
| 0
| 0
| 0
| 0
| 0.007843
| 1
| 0.05098
| false
| 0
| 0.05098
| 0.023529
| 0.223529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76de48d1a553599d42928e5621ab909ebe023773
| 1,276
|
py
|
Python
|
scripts/senate_crawler.py
|
tompsh/tompsh.github.io
|
3283ee2de46730adf14ef4f6bd2963b345500562
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/senate_crawler.py
|
tompsh/tompsh.github.io
|
3283ee2de46730adf14ef4f6bd2963b345500562
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/senate_crawler.py
|
tompsh/tompsh.github.io
|
3283ee2de46730adf14ef4f6bd2963b345500562
|
[
"BSD-2-Clause"
] | null | null | null |
from bs4 import BeautifulSoup
import logging
import pandas as pd
import csv
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def get_html(url):
return requests.get(url).text
class SenateCrawler:
def __init__(self):
self.base_url = "https://www25.senado.leg.br/"
self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome"
self.senate = []
def get_senate(self, url):
soup = BeautifulSoup(get_html(self.search_url), "html.parser")
trs = soup.find("table").find("tbody").find_all("tr")
for tr in trs:
cells = tr.find_all("td")
senateperson = {
"name": cells[0].get_text(),
"party": cells[1].get_text(),
"email": cells[5].get_text(),
}
if senateperson["email"]:
self.senate.append(senateperson)
def run(self):
try:
self.get_senate(self.search_url)
except Exception:
logging.exception("global failure")
finally:
df = pd.DataFrame(self.senate)
df.to_csv("senate.csv")
logging.info("program exited")
| 27.73913
| 87
| 0.590125
| 156
| 1,276
| 4.705128
| 0.512821
| 0.040872
| 0.053134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006522
| 0.278997
| 1,276
| 45
| 88
| 28.355556
| 0.791304
| 0
| 0
| 0
| 0
| 0
| 0.145768
| 0.050157
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.194444
| 0.027778
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76ded3c51388324a8e665394e6561d69d52c808d
| 6,101
|
py
|
Python
|
laceworksdk/api/container_registries.py
|
kiddinn/python-sdk
|
23a33313f97337fddea155bcb19c8d5270fc8013
|
[
"MIT"
] | 10
|
2021-03-20T18:12:16.000Z
|
2022-02-14T21:33:23.000Z
|
laceworksdk/api/container_registries.py
|
kiddinn/python-sdk
|
23a33313f97337fddea155bcb19c8d5270fc8013
|
[
"MIT"
] | 10
|
2021-02-22T23:31:32.000Z
|
2022-03-25T14:11:27.000Z
|
laceworksdk/api/container_registries.py
|
kiddinn/python-sdk
|
23a33313f97337fddea155bcb19c8d5270fc8013
|
[
"MIT"
] | 7
|
2021-06-18T18:17:12.000Z
|
2022-03-25T13:52:14.000Z
|
# -*- coding: utf-8 -*-
"""
Lacework Container Registries API wrapper.
"""
import logging
logger = logging.getLogger(__name__)
class ContainerRegistriesAPI(object):
"""
Lacework Container Registries API.
"""
def __init__(self, session):
"""
Initializes the ContainerRegistriesAPI object.
:param session: An instance of the HttpSession class
:return ContainerRegistriesAPI object.
"""
super(ContainerRegistriesAPI, self).__init__()
self._session = session
def create(self,
name,
type,
enabled,
data,
org=False):
"""
A method to create a new container registry.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Creating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries"
data = {
"name": name,
"type": type,
"enabled": int(bool(enabled)),
"data": data
}
response = self._session.post(api_uri, org=org, data=data)
return response.json()
def get(self,
guid=None,
type=None,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Getting container registry info from Lacework...")
# Build the Container Registries request URI
if guid:
api_uri = f"/api/v2/ContainerRegistries/{guid}"
elif type:
api_uri = f"/api/v2/ContainerRegistries/{type}"
else:
api_uri = "/api/v2/ContainerRegistries"
response = self._session.get(api_uri, org=org)
return response.json()
def get_by_type(self,
type,
org=False):
"""
A method to get all container registries by type.
:param type: A string representing the container registry type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(type=type, org=org)
def get_by_guid(self,
guid,
org=False):
"""
A method to get all container registries.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(guid=guid, org=org)
def search(self,
query_data=None,
org=False):
"""
A method to search container registries.
:param query_data: A dictionary containing the desired search parameters.
(filters, returns)
:return response json
"""
logger.info("Searching container registries from Lacework...")
# Build the Container Registries request URI
api_uri = "/api/v2/ContainerRegistries/search"
response = self._session.post(api_uri, data=query_data, org=org)
return response.json()
def update(self,
guid,
name=None,
type=None,
enabled=None,
data=None,
org=False):
"""
A method to update an container registry.
:param guid: A string representing the container registry GUID.
:param name: A string representing the container registry name.
:param type: A string representing the container registry type.
:param enabled: A boolean/integer representing whether the container registry is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Updating container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
tmp_data = {}
if name:
tmp_data["name"] = name
if type:
tmp_data["type"] = type
if enabled is not None:
tmp_data["enabled"] = int(bool(enabled))
if data:
tmp_data["data"] = data
response = self._session.patch(api_uri, org=org, data=tmp_data)
return response.json()
def delete(self,
guid,
org=False):
"""
A method to delete an container registry.
:param guid: A string representing the container registry GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Deleting container registry in Lacework...")
# Build the Container Registries request URI
api_uri = f"/api/v2/ContainerRegistries/{guid}"
response = self._session.delete(api_uri, org=org)
if response.status_code == 204:
return response
else:
return response.json()
| 28.914692
| 97
| 0.591051
| 669
| 6,101
| 5.324365
| 0.152466
| 0.090679
| 0.067378
| 0.061763
| 0.70438
| 0.650477
| 0.609208
| 0.576081
| 0.561202
| 0.54941
| 0
| 0.003688
| 0.333388
| 6,101
| 210
| 98
| 29.052381
| 0.872142
| 0.433208
| 0
| 0.325
| 0
| 0
| 0.164622
| 0.076346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.0125
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76dff496b7787e808a82fccd90d499cb2d9e785d
| 1,994
|
py
|
Python
|
tests/flows/test_consent.py
|
mrkday/SATOSA
|
43fd13273d7633b1d496d9c9aaef97c472ebd448
|
[
"Apache-2.0"
] | 92
|
2017-11-08T08:01:27.000Z
|
2022-03-14T09:44:09.000Z
|
tests/flows/test_consent.py
|
mrkday/SATOSA
|
43fd13273d7633b1d496d9c9aaef97c472ebd448
|
[
"Apache-2.0"
] | 155
|
2017-10-31T15:11:06.000Z
|
2022-03-11T16:59:23.000Z
|
tests/flows/test_consent.py
|
mrkday/SATOSA
|
43fd13273d7633b1d496d9c9aaef97c472ebd448
|
[
"Apache-2.0"
] | 73
|
2017-11-05T13:53:40.000Z
|
2022-03-23T15:34:00.000Z
|
import json
import re
import responses
from werkzeug.test import Client
from werkzeug.wrappers import Response
from satosa.proxy_server import make_app
from satosa.satosa_config import SATOSAConfig
class TestConsent:
def test_full_flow(self, satosa_config_dict, consent_module_config):
api_url = "https://consent.example.com/api"
redirect_url = "https://consent.example.com/redirect"
consent_module_config["config"]["api_url"] = api_url
consent_module_config["config"]["redirect_url"] = redirect_url
satosa_config_dict["MICRO_SERVICES"].append(consent_module_config)
# application
test_client = Client(make_app(SATOSAConfig(satosa_config_dict)), Response)
# incoming auth req
http_resp = test_client.get("/{}/{}/request".format(satosa_config_dict["BACKEND_MODULES"][0]["name"],
satosa_config_dict["FRONTEND_MODULES"][0]["name"]))
assert http_resp.status_code == 200
verify_url_re = re.compile(r"{}/verify/\w+".format(api_url))
with responses.RequestsMock() as rsps:
# fake no previous consent
consent_request_url_re = re.compile(r"{}/creq/\w+".format(api_url))
rsps.add(responses.GET, verify_url_re, status=401)
rsps.add(responses.GET, consent_request_url_re, "test_ticket", status=200)
# incoming auth resp
http_resp = test_client.get("/{}/response".format(satosa_config_dict["BACKEND_MODULES"][0]["name"]))
assert http_resp.status_code == 302
assert http_resp.headers["Location"].startswith(redirect_url)
with responses.RequestsMock() as rsps:
# fake consent
rsps.add(responses.GET, verify_url_re, json.dumps({"foo": "bar"}), status=200)
# incoming consent response
http_resp = test_client.get("/consent/handle_consent")
assert http_resp.status_code == 200
| 41.541667
| 112
| 0.660481
| 243
| 1,994
| 5.144033
| 0.312757
| 0.0672
| 0.0768
| 0.0432
| 0.3608
| 0.2464
| 0.2224
| 0.1136
| 0
| 0
| 0
| 0.013619
| 0.22668
| 1,994
| 47
| 113
| 42.425532
| 0.797017
| 0.056169
| 0
| 0.133333
| 0
| 0
| 0.142933
| 0.012267
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.033333
| false
| 0
| 0.233333
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e06c68d3769fb919b634d12c79af9d79a056b9
| 18,072
|
py
|
Python
|
qnarre/models/transfo_xl.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/models/transfo_xl.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
qnarre/models/transfo_xl.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# https://arxiv.org/abs/1901.02860
# https://github.com/kimiyoung/transformer-xl
import torch
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core.embed import Adaptive, Positional
from ..core.ffnet import Positionwise
from ..prep.config.transfo_xl import PreTrained
log = logging.get_logger(__name__)
class Model(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.tok_emb = Adaptive(cfg.cutoffs, div_val=cfg.div_val, **kw)
self.pos_emb = Positional(cfg.d_model, **kw)
if cfg.untie_r:
q_bias = None
r_bias = None
else:
q_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))
r_bias = nn.Parameter(torch.FloatTensor(cfg.n_heads, cfg.d_head))
self.lays = qc.Stack()
for _ in range(cfg.n_lays):
self.lays.append(Layer(q_bias=q_bias, r_bias=r_bias, **kw))
self.drop = qc.Dropout(cfg.drop, **kw)
def init_mems(self, b):
cfg = self.cfg
if cfg.mem_len > 0:
p = next(self.parameters())
kw = dict(dtype=p.dtype, device=p.device)
return [torch.zeros(cfg.mem_len, b, cfg.d_model, **kw) for _ in range(cfg.n_lays)]
return None
def update_mems(self, xs, ys, mlen, qlen):
assert len(xs) == len(ys)
e = mlen + max(0, qlen)
b = max(0, e - self.cfg.mem_len)
with torch.no_grad():
return [torch.cat([ys[i], xs[i]], dim=0)[b:e].detach() for i in range(len(xs))]
def forward(self, x, mems=None, head_m=None, x_emb=None, **kw):
cfg = self.cfg
yo = self.get_y_opts(**kw)
if x is None:
x_emb = x_emb.transpose(0, 1).contiguous()
s = x_emb.size()[:-1]
else:
assert x_emb is None
x = x.transpose(0, 1).contiguous()
s = x.size()
y = self.tok_emb(x) if x_emb is None else x_emb
n, b = s
if mems is None:
mems = self.init_mems(b)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + n
pos = torch.arange(klen - 1, -1, -1.0, device=y.device, dtype=y.dtype)
if cfg.clamp_len > 0:
pos.clamp_(max=cfg.clamp_len)
pos = self.drop(self.pos_emb(pos))
ones = y.new_ones((n, klen), dtype=torch.uint8)
if cfg.same_length:
d = klen - cfg.mem_len
shift = n - d if d > 0 else n
dec_m = (torch.triu(ones, 1 + mlen) + torch.tril(ones, -shift))[:, :, None]
else:
dec_m = torch.triu(ones, diagonal=1 + mlen)[:, :, None]
y = self.drop(y)
attns = () if yo.attn else None
hiddens = () if yo.hidden else None
head_m = self.get_head_m2(head_m, cfg.n_lays)
for i, lay in enumerate(self.lays):
if yo.hidden:
hiddens += (y,)
m = None if mems is None else mems[i]
ys = lay(y, pos, **kw, dec_m=dec_m, head_m=head_m[i], mems=m, yo=yo)
y = ys[0]
if yo.attn:
attns += (ys[1],)
y = self.drop(y)
mems = None if mems is None else self.update_mems(hiddens, mems, mlen, n)
if yo.attn:
attns = tuple(x.permute(2, 3, 0, 1).contiguous() for x in attns)
if yo.hidden:
hiddens += (y,)
hiddens = tuple(x.transpose(0, 1).contiguous() for x in hiddens)
y = y.transpose(0, 1).contiguous()
ys = (y, attns, hiddens, mems)
return qo.WithMems(*ys) if yo.kw else ys
class ForSeqClassifier(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_embed, cfg.n_labels, bias=False, **kw)
forward = qf.forward_seq
def post_proj(self, x):
cfg = self.cfg
b = (x.shape[:2] if x is not None else x_emb.shape[:2])[0]
if cfg.PAD is None:
n = -1
else:
assert b == 1
n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1
return x[torch.arange(b, device=self.device), n]
class LLMHead(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
assert cfg.sample_softmax <= 0
self.proj = Projector(
cfg.s_vocab, cfg.d_embed, cfg.d_model, cfg.cutoffs, div_val=cfg.div_val, **kw
)
def tie_weights(self):
cfg = self.cfg
if cfg.tie_word_embeds:
for i in range(len(self.proj.out_layers)):
self._tie_or_clone_weights(self.proj.out_layers[i], self.model.tok_emb.lays[i])
if cfg.tie_projs:
for i, tie_proj in enumerate(cfg.tie_projs):
if tie_proj and cfg.div_val == 1 and cfg.d_model != cfg.d_embed:
if cfg.torchscript:
self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[0].clone())
else:
self.proj.out_projs[i] = self.model.tok_emb.projs[0]
elif tie_proj and cfg.div_val != 1:
if cfg.torchscript:
self.proj.out_projs[i] = nn.Parameter(self.model.tok_emb.projs[i].clone())
else:
self.proj.out_projs[i] = self.model.tok_emb.projs[i]
def init_mems(self, bsz):
return self.model.init_mems(bsz)
def forward(self, x, x_emb=None, labels=None, **kw):
yo = self.get_y_opts(**kw)
if x is None:
assert x_emb is not None
b, tgt = x_emb.size(0), x_emb.size(1)
else:
b, tgt = x.size(0), x.size(1)
ys = self.model(x, x_emb=x_emb, **kw, yo=yo)
xs = self.proj(ys[0][:, -tgt:], labels)
y = xs.view(b, tgt, -1) if labels is None else ()
loss = xs.view(b, tgt - 1) if labels is not None else None
ys = (y,) + ys[1:] + (loss,)
return qo.LossMems(*ys) if yo.kw else ys
class Projector(qc.Module):
def __init__(self, s_vocab, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
super().__init__()
self.s_vocab = s_vocab
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [s_vocab]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = qc.Stack()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
self.out_projs.append(None)
self.out_layers.append(qc.Linear(d_embed, s_vocab))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
self.out_layers.append(qc.Linear(d_emb_i, r_idx - l_idx))
self.keep_order = keep_order
def _compute_logit(self, x, weight, bias, proj):
if proj is None:
y = F.linear(x, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
x = F.linear(x, proj.t().contiguous())
y = F.linear(x, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return y
def forward(self, x, labels=None, keep_order=False):
if labels is not None:
x = x[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
x = x.view(-1, x.size(-1))
labels = labels.view(-1)
assert x.size(0) == labels.size(0)
else:
x = x.view(-1, x.size(-1))
if self.n_clusters == 0:
y = self._compute_logit(
x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]
)
if labels is not None:
y = -F.log_softmax(y, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1)
else:
y = F.log_softmax(y, dim=-1)
else:
ws, bs = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
ws.append(weight_i)
bs.append(bias_i)
head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]
head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if labels is None:
y = x.new_empty((head_logit.size(0), self.s_vocab))
else:
y = torch.zeros_like(labels, dtype=x.dtype, device=x.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
mask_i = (labels >= l_idx) & (labels < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = labels.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = x.index_select(0, indices_i)
else:
hidden_i = x
if i == 0:
if labels is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1
if labels is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None]
).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
y[:, l_idx:r_idx] = logprob_i
if labels is not None:
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
y.index_copy_(0, indices_i, -logprob_i)
else:
y[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return y
def log_prob(self, x):
if self.n_clusters == 0:
y = self._compute_logit(
x, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]
)
return F.log_softmax(y, dim=-1)
else:
ws, bs = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
ws.append(weight_i)
bs.append(bias_i)
head_weight, head_bias, head_proj = ws[0], bs[0], self.out_projs[0]
head_logit = self._compute_logit(x, head_weight, head_bias, head_proj)
y = x.new_empty((head_logit.size(0), self.s_vocab))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
beg_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
y[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = ws[i], bs[i], self.out_projs[i]
tail_logit_i = self._compute_logit(x, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
y[:, beg_idx, stop_idx] = logprob_i
return y
class Layer(qc.Module):
def __init__(self, **kw):
super().__init__()
self.attn = Attention(**kw)
self.ff = Positionwise(**kw)
def forward(self, x, r, dec_m=None, **kw):
ys = self.attn(x, r, mask=dec_m, **kw)
return (self.ff(ys[0]),) + ys[1:]
class Attention(qc.Module):
hs = qc.Hypers(
{"d_head", "d_model", "drop", "n_heads"},
{"drop_attn": 0.0, "eps": 1e-5, "pre_norm": False},
)
def __init__(self, r_bias=None, q_bias=None, ps={}, hs=[], **kw):
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
m, n, h = cfg.d_model, cfg.n_heads, cfg.d_head
cfg.scale = 1 / (h**0.5)
self.qkv = qc.Linear(m, 3 * n * h, bias=False)
self.r_net = qc.Linear(m, n * h, bias=False)
if r_bias is None or q_bias is None:
self.q_bias = nn.Parameter(torch.FloatTensor(n, h))
self.r_bias = nn.Parameter(torch.FloatTensor(n, h))
else:
self.q_bias = q_bias
self.r_bias = r_bias
self.drop = qc.Dropout(cfg.drop, **kw)
self.drop_attn = qc.Dropout(cfg.drop_attn, **kw)
self.proj = qc.Linear(n * h, m, bias=False, **kw)
self.norm = qc.LayerNorm(m, **kw)
def rel_shift(self, x, zero_triu=False):
s = (x.size(0), 1) + x.size()[2:]
y = torch.zeros(s, device=x.device, dtype=x.dtype)
y = torch.cat([y, x], dim=1)
s = (x.size(1) + 1, x.size(0)) + x.size()[2:]
y = y.view(*s)
y = y[1:].view_as(x)
if zero_triu:
ones = torch.ones((y.size(0), y.size(1)))
y = y * torch.tril(ones, y.size(1) - y.size(0))[:, :, None, None]
return y
def forward(self, x, r, mask=None, mems=None, head_m=None, **kw):
cfg = self.cfg
yo = self.get_y_opts(**kw)
y = x if mems is None else torch.cat([mems, x], 0)
y = self.qkv(self.norm(y) if cfg.pre_norm else y)
r = self.r_net(r)
q, k, v = torch.chunk(a, 3, dim=-1)
qlen, klen, rlen = x.size(0), k.size(0), r.size(0)
q = q if mems is None else q[-qlen:]
b, n, h = x.size(1), cfg.n_heads, cfg.d_head
q = q.view(qlen, b, n, h)
k = k.view(klen, b, n, h)
v = v.view(klen, b, n, h)
r = r.view(rlen, n, h)
AC = torch.einsum("ibnd,jbnd->ijbn", (q + self.q_bias, k))
BD = self.rel_shift(torch.einsum("ibnd,jnd->ijbn", (q + self.r_bias, r)))
a = AC + BD
a.mul_(cfg.scale)
if mask is not None and torch.sum(mask).item():
mask = mask == 1
i = self.get_minus_inf()
if mask.dim() == 2:
a = a.float().masked_fill(mask[None, :, :, None], i).type_as(a)
elif mask.dim() == 3:
a = a.float().masked_fill(mask[:, :, :, None], i).type_as(a)
a = self.drop_attn(F.softmax(a, dim=1))
if head_m is not None:
a = a * head_m
y = torch.einsum("ijbn,jbnd->ibnd", (a, v))
y = y.contiguous().view(y.size(0), y.size(1), n * h)
y = x + self.drop(self.proj(y))
ys = (y,) if cfg.pre_norm else (self.norm(y),)
if yo.attn:
ys += (a,)
return ys
| 42.224299
| 98
| 0.529714
| 2,664
| 18,072
| 3.412538
| 0.116742
| 0.01375
| 0.02145
| 0.00792
| 0.452315
| 0.390606
| 0.348367
| 0.305687
| 0.277857
| 0.263117
| 0
| 0.015772
| 0.333389
| 18,072
| 427
| 99
| 42.323185
| 0.738856
| 0.049801
| 0
| 0.349333
| 0
| 0
| 0.005714
| 0
| 0
| 0
| 0
| 0
| 0.016
| 1
| 0.050667
| false
| 0
| 0.029333
| 0.002667
| 0.138667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e0ef3752aa275816b6ecc85b1a2c5f0647c59d
| 3,429
|
py
|
Python
|
src/align/face_align_celeba.py
|
Dou-Yu-xuan/pykinship
|
f81f6667fa08a08fe726736d05476168b2a3e2f0
|
[
"MIT"
] | 12
|
2020-02-19T02:50:49.000Z
|
2022-03-31T19:39:35.000Z
|
src/align/face_align_celeba.py
|
Dou-Yu-xuan/pykinship
|
f81f6667fa08a08fe726736d05476168b2a3e2f0
|
[
"MIT"
] | 68
|
2020-03-23T00:07:28.000Z
|
2022-03-28T10:02:16.000Z
|
src/align/face_align_celeba.py
|
Dou-Yu-xuan/pykinship
|
f81f6667fa08a08fe726736d05476168b2a3e2f0
|
[
"MIT"
] | 3
|
2020-02-11T19:07:08.000Z
|
2020-11-04T18:48:00.000Z
|
import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
| 32.657143
| 90
| 0.585885
| 417
| 3,429
| 4.647482
| 0.326139
| 0.06192
| 0.026316
| 0.024768
| 0.158927
| 0.110423
| 0.110423
| 0.110423
| 0.110423
| 0.110423
| 0
| 0.006163
| 0.290172
| 3,429
| 104
| 91
| 32.971154
| 0.790058
| 0.131234
| 0
| 0.122222
| 0
| 0
| 0.169474
| 0.037062
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e38e9aaa4e8905b66b235b95aefae36be7dc3f
| 25,699
|
py
|
Python
|
rpg_game/gui.py
|
ricott1/twissh
|
8cbed5eef8e3326a92855cdc2cfea3f4ce214d8d
|
[
"MIT"
] | null | null | null |
rpg_game/gui.py
|
ricott1/twissh
|
8cbed5eef8e3326a92855cdc2cfea3f4ce214d8d
|
[
"MIT"
] | null | null | null |
rpg_game/gui.py
|
ricott1/twissh
|
8cbed5eef8e3326a92855cdc2cfea3f4ce214d8d
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import urwid
import time, os, copy
from rpg_game.utils import log, mod, distance
from rpg_game.constants import *
from urwid import raw_display
SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows()
MIN_HEADER_HEIGHT = 3
MAX_MENU_WIDTH = 48
FOOTER_HEIGHT = 4
PALETTE = [
("line", 'black', 'white', "standout"),
("top","white","black"),
("frame","white","white"),
("player", "light green", "black"),
("other", "light blue", "black"),
("monster", "dark red", "black"),
("fatigued", "dark red", "white", "standout"),
("reversed", "standout", ""),
("common","white","black"),
("common_line","black","white","standout"),
("uncommon","dark cyan","black"),
("uncommon_line","dark cyan","white","standout"),
("rare","yellow","black"),
("rare_line","yellow","white","standout"),
("unique","light magenta","black"),
("unique_line","light magenta","white","standout"),
("set","light green","black"),
("set_line","light green","white","standout"),
("normal","white","black"),
("positive","light green","black"),
("negative","dark red","black"),
("white","white","black"),
("disabled","dark red","black"),
("red","dark red","black"),
("green","light green","black"),
("yellow","yellow","black"),
("brown","brown","black"),
("white_line","black","white", "standout"),
("red_line","dark red","white", "standout"),
("green_line","light green","white", "standout"),
("yellow_line","yellow","white", "standout"),
("cyan","light cyan","black"),
("cyan_line","light cyan","white", "standout"),
("name","white","black"),
]
class UiFrame(urwid.Frame):
def __init__(self, parent, mind, *args, **kargs):
self.parent = parent
self.mind = mind
urwid.AttrMap(self,"frame")
super().__init__(*args, **kargs)
@property
def player(self):
if self.mind.avatar.uuid in self.mind.master.players:
return self.mind.master.players[self.mind.avatar.uuid]
else:
return None
@property
def connection(self):
if self.mind.avatar.uuid in self.mind.connections:
return self.mind.connections[self.mind.avatar.uuid]
else:
return None
def handle_input(self, _input):
pass
def on_update(self):
pass
def dispatch_event(self, event_type, *args):
self.mind.get_GUI_event(event_type, *args)
def register_event(self, event_type, callback):
self.mind.register_GUI_event(event_type, callback)
def disconnect(self):
pass
def restart(self):
pass
def focus_next(self):
pass
def focus_previous(self):
pass
def update_body(self, title, no_title=False, boxed=False):
self.active_body = self.bodies[title]
if boxed:
if no_title:
self.contents["body"] = (urwid.LineBox(self.active_body), None)
else:
self.contents["body"] = (urwid.LineBox(self.active_body, title=title), None)
else:
self.contents["body"] = (self.active_body, None)
class GUI(UiFrame):
def __init__(self, parent, mind):
self.bodies = {"Intro" : IntroFrame(self, mind)}
self.active_body = self.bodies["Intro"]
super().__init__(parent, mind, self.active_body)
def on_update(self):
self.active_body.on_update()
def handle_input(self, _input):
# print("HANDLING", _input)
self.active_body.handle_input(_input)
# def exit(self):
# self.disconnect()
# self.mind.disconnect()#should use dispatch event
def restart(self):
self.update_body("Intro", no_title=True)
def start_game_frame(self):
self.bodies["Game"] = GameFrame(self, self.mind)
self.update_body("Game", no_title=True)
class IntroFrame(UiFrame):
def __init__(self, parent, mind):
# urwid.Padding(urwid.BigText(('top', "Hack\'n\'SSH"), urwid.HalfBlock5x4Font())),
self.choices = ("Warrior", "Dwarf", "Wizard", "Thief", "Bard")
self.descriptions = {"Warrior": "The mighty warrior\n\nStrength +1, Hit points +4\nCharge and parry",
"Dwarf": "The short dwarf\n\nStrength +1, Constitution +1, Hit points +6\nDemolish and parry",
"Wizard": "The opportune wizard\n\nIntelligence +1\n Fireball, teleport and ice wall",
"Thief": "The sneaky thief\n\nDexterity +1, Intelligence +1, Hit points +2\nSneak attack, hide and trap",
"Bard": "The noisy bard\n\nCharisma +1, Dexterity +1, Intelligence +1, Hit points +2\nSing and summon"}
line = []
for c in self.choices:
btn = attr_button(c, self.select_class)
line.append(btn)
walker = urwid.SimpleFocusListWalker(line)
urwid.connect_signal(walker, "modified", self.update_description)
self.listbox = SelectableListBox(walker)
header = urwid.LineBox(urwid.BoxAdapter(self.listbox, len(self.choices)+1))
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions["Warrior"])])), header=header, focus_part="header")
def select_class(self, button):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.mind.master.new_player(self.mind.avatar.uuid, choice)
self.parent.start_game_frame()
def update_description(self):
index = min(self.listbox.focus_position, len(self.choices)-1)
choice = self.choices[index]
self.contents["body"] = (urwid.ListBox(urwid.SimpleListWalker([urwid.Text(self.descriptions[choice])])), None)
class GameFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
_header = urwid.LineBox(urwid.BoxAdapter(SelectableListBox(urwid.SimpleFocusListWalker([urwid.Text("")])), self.header_height))
self._menu_view = True
self.map = MapFrame(self, mind)
self.menu = MenuFrame(self, mind)
super().__init__(parent, mind, urwid.Columns([(self.map_width, self.map), (self.menu_width, self.menu)], focus_column=1), header=_header, footer=None, focus_part="body")
self.menu_view = True
self.update_footer()
self.header_widget = self.header.original_widget.box_widget
self.footer_content_size = 0
@property
def header_height(self):
return MIN_HEADER_HEIGHT#max(MIN_HEADER_HEIGHT, self.mind.screen_size[1]//8)
@property
def menu_width(self):
if self.menu_view:
return min(MAX_MENU_WIDTH, (3*self.mind.screen_size[0])//7)
return 0
@property
def map_width(self):
if self.menu_view:
return self.mind.screen_size[0] - self.menu_width
return self.mind.screen_size[0]
@property
def body_width(self):
return self.mind.screen_size[0]
@property
def body_height(self):
return self.mind.screen_size[1] - self.header_height - FOOTER_HEIGHT - 2
@property
def menu_view(self):
return self._menu_view
@menu_view.setter
def menu_view(self, value):
self._menu_view = value
_columns = [(self.map_width, self.map), (self.menu_width, self.menu)]
self.contents["body"] = (urwid.Columns(_columns, focus_column=1), None)
@property
def header_list(self):
return sorted([ent for k, ent in self.player.location.entities.items() if distance(self.player.position, ent.position) <= 3 and ent.status], key=lambda ent: distance(self.player.position, ent.position))
def update_footer(self):
_size = 0
inv_btns = []
for i, obj in self.player.inventory.content.items():
if obj:
_size += 1
if obj.is_equipment and obj.is_equipped:
_marker = ["[", (obj.color, f"{obj.marker[0]}"), "]"]
elif obj.is_equipment and not obj.is_equipped:
_marker = ["]", (obj.color, f"{obj.marker[0]}"), "["]
elif obj.is_consumable:
_marker = ["(", (obj.color, f"{obj.marker[0]}"), ")"]
else:
_marker = [f" {obj.marker[0]} "]
else:
_marker = [f" "]
if i < 9:
_num = f"\n {i+1} "
elif i == 9:
_num = "\n 0 "
elif i == 10:
_num = "\n - "
elif i == 11:
_num = "\n = "
if obj and obj is self.player.inventory.selection:
_marker += [("line", _num)]
else:
_marker += [("top", _num)]
btn = urwid.Text(_marker, align="center")
inv_btns.append((5, urwid.LineBox(btn)))
if self.mind.screen_size != (80, 24):
inv_btns.append(urwid.Text("\nSET TERMINAL\nTO 80X24", align="center"))
self.contents["footer"] = (SelectableColumns(inv_btns, dividechars=0), None)
self.footer_content_size = _size
def on_update(self):
self.update_header()
if self.footer_content_size != len(self.player.inventory.all):
self.update_footer()
if self.mind.screen_size != (80, 24):
self.update_footer()
self.map.on_update()
if self.menu_view:
self.menu.on_update()
def handle_input(self, _input):
if _input == "tab":
self.menu_view = not self.menu_view
elif _input == "enter" and self.player.inventory.selection:
self.player.use_quick_item(self.player.inventory.selection)
self.update_footer()
elif _input == "Q" and self.player.inventory.selection:
self.player.actions["drop"].use(self.player, obj=self.player.inventory.selection)
self.update_footer()
elif _input.isnumeric() or _input in ("-", "="):
self.select_item(_input)
self.update_footer()
elif _input == self.mind.key_map["status-menu"] and self.menu_view:
self.menu.update_body("Status")
elif _input == self.mind.key_map["help-menu"] and self.menu_view:
self.menu.update_body("Help")
elif _input == self.mind.key_map["equipment-menu"] and self.menu_view:
self.menu.update_body("Equipment")
elif _input == self.mind.key_map["inventory-menu"] and self.menu_view:
self.menu.update_body("Inventory")
else:
self.map.handle_input(_input)
def select_item(self, _input):
if _input.isnumeric() and int(_input) > 0:
_input = int(_input)-1
elif _input == "0":
s_input = 9
elif _input == "-":
_input = 10
elif _input == "=":
_input = 11
self.player.inventory.selection = self.player.inventory.get(_input)
def update_header(self):
widgets = []
for p in self.header_list:
widgets.append(urwid.AttrMap(urwid.AttrMap(urwid.Text(p.status, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}))
if widgets:
self.header_widget.body[:] = widgets
class MapFrame(UiFrame):
def __init__(self, parent, mind):
map_box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.map_box = map_box.body
self.layer_view = -1
self.debug_view = False
super().__init__(parent, mind, map_box)
self.on_update()
@property
def visible_range(self):
header_height = self.parent.header_height + 2
tot_rows = self.mind.screen_size[1]
return (tot_rows - header_height - FOOTER_HEIGHT)
def on_update(self):
if self.layer_view == -1:
_map = copy.deepcopy(self.player.location.map)
else:
_map = self.player.location.layer_from_entities(self.layer_view, self.debug_view)
x, y, z = self.player.position
w = max(0, y - self.parent.body_width//3)
visible_map = [line[w:w+self.parent.body_width] for line in _map]
h = max(0, x - self.parent.body_height//2)
if h+self.parent.body_height >= len(visible_map):
visible_map = visible_map[len(visible_map)-self.parent.body_height:]
else:
visible_map = visible_map[h:h+self.parent.body_height]
map_with_attr = [urwid.AttrMap(urwid.AttrMap(urwid.Text(line, wrap="clip"), {self.player.id:"player"}), {p.id:"other" for i, p in self.mind.master.players.items()}) for line in visible_map]
self.map_box[:] = map_with_attr
def handle_input(self, _input):
if _input == "ctrl f":
self.debug_view = not self.debug_view
elif _input == "ctrl v":
self.layer_view = self.layer_view + 1
if self.layer_view > 2:
self.layer_view = -1
elif _input in self.mind.key_map:
_action = self.mind.key_map[_input]
self.player.handle_input(_action)
class MenuFrame(UiFrame):
def __init__(self, parent, mind):
_frames = ("Inventory", "Status", "Equipment", "Help")
self.bodies = {b : globals()[f"{b}Frame"](self, mind) for b in _frames}
idx = -1
_title = _frames[idx]
self.active_body = self.bodies[_title]
super().__init__(parent, mind, urwid.LineBox(self.active_body, title=_title))
def on_update(self):
self.active_body.on_update()
def selectable(self):
return False
def update_body(self, _title):
self.active_body = self.bodies[_title]
self.contents["body"] = (urwid.LineBox(self.active_body, title=_title), None)
class InventoryFrame(UiFrame):
def __init__(self, parent, mind):
columns = urwid.Columns([urwid.Text("")])
box = urwid.ListBox(urwid.SimpleListWalker([columns]))
self.box = box.body
self.default_header = urwid.Text("0/9-= to select\n\n", align="center")
self.default_footer = urwid.Text([("green", f"{'Enter:use/eqp':<14s}"), ("yellow", "Q:drop")], align="center")
super().__init__(parent, mind, box, header=self.default_header, footer=self.default_footer)
@property
def selection_data(self):
if not self.player.inventory.selection:
return urwid.Text("")
i = self.player.inventory.selection
_text = []
_text += [i.eq_description, f"\nEncumbrance:{i.encumbrance}\n"]
return urwid.Text(_text)
def update_header(self):
if not self.player.inventory.selection:
self.contents["header"] = (self.default_header, None)
else:
i = self.player.inventory.selection
self.contents["header"] = (urwid.Text([(i.color, f"{i.name}\n"), f"{i.description}\n"], align="center"), None)
def update_footer(self):
if not self.player.inventory.selection:
self.contents["footer"] = (self.default_footer, None)
else:
i = self.player.inventory.selection
_text = []
if not i.requisites(self.player):
_text += [("red", f"{'Cannot equip':<14s}")]
elif not i.is_equipped:
_text += [("green", f"{'Enter:equip':<14s}")]
elif i.is_equipped:
_text += [("green", f"{'Enter:unequip':<14s}")]
elif i.is_consumable:
_text += [("green", f"{'Enter:use':<14s}")]
_text += [("yellow", "Q:drop")]
self.contents["footer"] = (urwid.Text(_text, align="center"), None)
def update_body(self):
side = urwid.Text("║")
width = 8
height = 6
_marker_box = ["╔" +"═"*width+"╗\n"]
for x in range(height):
_marker_box += ["║"]
for y in range(width):
_marker_box += ["."]
_marker_box += ["║\n"]
_marker_box += ["╚" +"═"*width+"╝"]
if self.player.inventory.selection:
i = self.player.inventory.selection
X_OFFSET = 2
Y_OFFSET = 4
for m, pos in zip(i.in_inventory_markers, i.in_inventory_marker_positions):
x, y = pos
_marker_box[(x+X_OFFSET)*(width+2)+y+Y_OFFSET] = (i.color, m)
self.box[:] = [urwid.Columns([(width+2, urwid.Text(_marker_box)), self.selection_data], dividechars=1)]
def on_update(self):
self.update_header()
self.update_body()
self.update_footer()
class StatusFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
x, y, z = player.position
_top = f"{player.name:<12s} {player.game_class.name:<10s}\nLev:{player.level:<2d} Exp:{player.exp:<4d} {player.location.name}@({x},{y})\n"
_left = []
for s in CHARACTERISTICS:
c = getattr(player, s)
state = ["normal", "positive", "negative"][-int(c.temp_bonus < 0) + int(c.temp_bonus > 0)]
if self.parent.parent.menu_width > 40:
_name = c.name[0].upper() + c.name[1:]
_left += [f"{_name:<12} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
elif self.parent.parent.menu_width > 36:
_name = c.name[0].upper() + c.name[1:6]
_left += [f"{_name:<6} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
else:
_left += [f"{s:<3} ", (state, f"{c.value:>2d}"), f" ({c.mod:<+2d})\n"]
_right = []
base = player.STR.mod
weapon = player.equipment["main_hand"]
if not weapon:
min_dmg, max_dmg = (1, 4)
else:
number, value = weapon.dmg
min_dmg, max_dmg = (number * 1, number * value)
min_dmg = max(1, base + min_dmg)
max_dmg = max(1, base + max_dmg)
_right.append(f"Damage {min_dmg:>3d}-{max_dmg:<3d}\n")
_right.append(f"Reduction {player.dmg_reduction:<3d}\n")
_right.append(f"Encumb ")
if player.inventory.encumbrance == EXTRA_ENCUMBRANCE_MULTI*player.encumbrance:
_right.append(("red", f"{player.inventory.encumbrance:>2d}"))
elif player.inventory.encumbrance > player.encumbrance:
_right.append(("yellow", f"{player.inventory.encumbrance:>2d}"))
else:
_right.append(("white", f"{player.inventory.encumbrance:>2d}"))
_right.append(f"/{player.encumbrance:<2d}\n")
_right.append(f"Speed {player.movement_speed}\n")
_right.append(f"Monsterized {player.MP:<2d}\n")
self.box[:] = [urwid.Text(_top), urwid.Columns([urwid.Text(_left), urwid.Text(_right)], dividechars = 1) ]
class EquipmentFrame(UiFrame):
def __init__(self, parent, mind):
box = urwid.ListBox(urwid.SimpleListWalker([urwid.Text("")]))
self.box = box.body
super().__init__(parent, mind, box)
def on_update(self):
player = self.player
_equipment = []
for t, obj in player.equipment.items():
_name = t.replace("_", " ")
_name = _name[0].upper() + _name[1:]
if obj:
_equipment += [urwid.Text([f"{_name}: ", (obj.color, f"{obj.name}")])]
else:
_equipment += [urwid.Text([f"{_name}: "])]
_bonus = {}
for eqp in player.equipment_set:
for b in set(list(eqp.bonus.keys()) + list(eqp.set_bonus.keys())):
val = player.full_eqp_bonus(eqp, b)
if b not in _bonus:
_bonus[b] = val
else:
_bonus[b] += val
_top = ""
for b, val in _bonus.items():
if b == "dmg_reduction":
_top += f"Reduction:{val} "
else:
_top += f"{b}:{val} "
_top += "\n"
self.box[:] = [urwid.Text(_top)] + _equipment
class HelpFrame(UiFrame):
def __init__(self, parent, mind):
self.mind = mind
map_commands = ["Map commands\n\n", f"←→↑↓:move\n", f"shift+←→↑↓:dash\n", f"a:attack\n", f"q:pickup\n"]
class_action_keys = [k for k, act in self.mind.key_map.items() if act.startswith("class_ability")]
for i, act in enumerate(self.player.class_actions):
k = class_action_keys[i]
map_commands.append(f"{k}:{self.player.class_actions[act].description.lower()}\n")
menu_commands = ["Menu commands\n\n", f"tab:open/close\n",f"0/9-=:select item\n", f"ctrl+p:respawn\n", f"ctrl+a:inventory\n", f"ctrl+s:status\n", f"ctrl+d:help\n", f"ctrl+e:equipment\n"]
columns = urwid.Columns([urwid.Text(map_commands, wrap="clip"), urwid.Text(menu_commands, wrap="clip")], dividechars = 1)
super().__init__(parent, mind, urwid.ListBox(urwid.SimpleListWalker([columns])))
class SelectableListBox(urwid.ListBox):
def __init__(self, body):
super(SelectableListBox, self).__init__(body)
def focus_next(self):
try:
self.focus_position += 1
except IndexError:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except IndexError:
pass
class SelectableColumns(urwid.Columns):
def __init__(self, widget_list, focus_column=None, dividechars=0):
super().__init__(widget_list, dividechars, focus_column)
def focus_next(self):
try:
self.focus_position += 1
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
except:
pass
class FrameColumns(urwid.Columns):
def __init__(self, parent, widget_list, dividechars=0):
self.widget_size = len(widget_list)
super(FrameColumns, self).__init__(widget_list, dividechars)
self.parent = parent
def focus_next(self):
try:
self.focus_position += 1
if self.focus_position >= self.widget_size:
self.focus_position -= self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
def focus_previous(self):
try:
self.focus_position -= 1
if self.focus_position < 0:
self.focus_position += self.widget_size
new_body = [b for b in self.parent.bodies][self.focus_position]
self.parent.update_body(new_body)
except:
pass
class ButtonLabel(urwid.SelectableIcon):
def set_text(self, label):
'''
set_text is invoked by Button.set_label
'''
self.__super.set_text(label)
self._cursor_position = len(label) + 1
class MyButton(urwid.Button):
'''
- override __init__ to use our ButtonLabel instead of urwid.SelectableIcon
- make button_left and button_right plain strings and variable width -
any string, including an empty string, can be set and displayed
- otherwise, we leave Button behaviour unchanged
'''
button_left = "["
button_right = "]"
def __init__(self, label, on_press=None, user_data=None, borders=True, disabled=False):
self._label = ButtonLabel("")
if borders:
cols = urwid.Columns([
('fixed', len(self.button_left), urwid.Text(self.button_left)),
self._label,
('fixed', len(self.button_right), urwid.Text(self.button_right))],
dividechars=1)
else:
cols = urwid.Columns([self._label],
dividechars=0)
super(urwid.Button, self).__init__(cols)
self.disabled = disabled
if on_press:
urwid.connect_signal(self, 'click', on_press, user_data)
self.set_label(label)
self.lllavel = label
# @property
# def disabled(self):
# return self._disabled
# @disabled.setter
# def disabled(self, value):
# if self._disabled == value:
# return
# if self.disabled:
# urwid.AttrMap(self, "disabled")
# else:
# urwid.AttrMap(self, None, "line")
def selectable(self):
return not self.disabled
def attr_button(label, cmd=None, attr_map=None, focus_map = "line", align = "center", user_args = None, borders=True, disabled=False):
btn = create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled)
return urwid.AttrMap(btn, attr_map, focus_map=focus_map)
def create_button(label, cmd=None, align = "center", user_args = None, borders=True, disabled=False):
btn = MyButton(label, borders=borders, disabled=disabled)
btn._label.align = align
if cmd:
if user_args:
urwid.connect_signal(btn, "click", cmd, user_args = user_args)
else:
urwid.connect_signal(btn, "click", cmd)
return btn
| 37.90413
| 210
| 0.575742
| 3,124
| 25,699
| 4.542574
| 0.131242
| 0.021986
| 0.022761
| 0.027623
| 0.360087
| 0.282292
| 0.232119
| 0.181805
| 0.169262
| 0.114721
| 0
| 0.008818
| 0.28075
| 25,699
| 677
| 211
| 37.960118
| 0.758007
| 0.032219
| 0
| 0.288104
| 0
| 0.007435
| 0.115273
| 0.018265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.124535
| false
| 0.022305
| 0.009294
| 0.013011
| 0.202602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e3aa393f7a0908df3e197db3a2c2ed201ee19d
| 4,851
|
py
|
Python
|
lale/lib/autogen/linear_regression.py
|
gbdrt/lale
|
291f824a6b96f088e787979ca768f50d7758424e
|
[
"Apache-2.0"
] | null | null | null |
lale/lib/autogen/linear_regression.py
|
gbdrt/lale
|
291f824a6b96f088e787979ca768f50d7758424e
|
[
"Apache-2.0"
] | null | null | null |
lale/lib/autogen/linear_regression.py
|
gbdrt/lale
|
291f824a6b96f088e787979ca768f50d7758424e
|
[
"Apache-2.0"
] | null | null | null |
from numpy import inf, nan
from sklearn.linear_model import LinearRegression as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class LinearRegressionImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LinearRegression Ordinary least squares Linear Regression.",
"allOf": [
{
"type": "object",
"required": ["fit_intercept", "normalize", "copy_X", "n_jobs"],
"relevantToOptimizer": ["fit_intercept", "normalize", "copy_X"],
"additionalProperties": False,
"properties": {
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, X will be copied; else, it may be overwritten.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only provide speedup for n_targets > 1 and sufficient large problems"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Target values",
},
"sample_weight": {
"type": "array",
"items": {"type": "number"},
"description": "Individual weights for each sample ",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
set_docstrings(LinearRegressionImpl, _combined_schemas)
LinearRegression = make_operator(LinearRegressionImpl, _combined_schemas)
| 35.408759
| 151
| 0.508555
| 438
| 4,851
| 5.468037
| 0.317352
| 0.037578
| 0.058455
| 0.060125
| 0.386221
| 0.31023
| 0.270564
| 0.270564
| 0.270564
| 0.23048
| 0
| 0.004677
| 0.338899
| 4,851
| 136
| 152
| 35.669118
| 0.742127
| 0
| 0
| 0.302326
| 0
| 0.007752
| 0.40301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.03876
| 0.007752
| 0.085271
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e62dfaead6e340b719c28d88044ea601c31718
| 1,309
|
py
|
Python
|
setup.py
|
awesome-archive/webspider
|
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
|
[
"MIT"
] | null | null | null |
setup.py
|
awesome-archive/webspider
|
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
|
[
"MIT"
] | null | null | null |
setup.py
|
awesome-archive/webspider
|
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
from app import __version__
# get the dependencies and installs
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
all_requirements = f.read().split('\n')
setup(
name='webspider',
version=__version__,
license='MIT',
author='heguozhu',
author_email='[email protected]',
description='lagou.com spider',
url='[email protected]:GuozhuHe/webspider.git',
packages=find_packages(exclude=['tests']),
package_data={'webspider': ['README.md']},
zip_safe=False,
install_requires=all_requirements,
entry_points={
'console_scripts': [
'web = app.web_app:main',
'production_web = app.quickly_cmd:run_web_app_by_gunicorn',
'crawl_lagou_data = app.tasks:crawl_lagou_data',
'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count',
'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker',
'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker',
'celery_beat = app.quickly_cmd:run_celery_beat',
'celery_flower = app.quickly_cmd.py:run_celery_flower',
],
}
)
| 34.447368
| 86
| 0.6822
| 169
| 1,309
| 4.911243
| 0.502959
| 0.060241
| 0.078313
| 0.077108
| 0.093976
| 0.06747
| 0
| 0
| 0
| 0
| 0
| 0.000944
| 0.190985
| 1,309
| 37
| 87
| 35.378378
| 0.782814
| 0.05806
| 0
| 0
| 0
| 0
| 0.464228
| 0.282927
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e72292730408078c92e31d3a0592b902469f3c
| 6,038
|
py
|
Python
|
Doc/conf.py
|
python-doc-tw/cpython-tw
|
9b83e9ffbdd2f3fc56de8dcdc8c4651feeb5a281
|
[
"PSF-2.0"
] | null | null | null |
Doc/conf.py
|
python-doc-tw/cpython-tw
|
9b83e9ffbdd2f3fc56de8dcdc8c4651feeb5a281
|
[
"PSF-2.0"
] | null | null | null |
Doc/conf.py
|
python-doc-tw/cpython-tw
|
9b83e9ffbdd2f3fc56de8dcdc8c4651feeb5a281
|
[
"PSF-2.0"
] | null | null | null |
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/extensions'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest',
'pyspecific', 'c_annotations']
# General substitutions.
project = 'Python'
copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# By default, highlight as Python 3.
highlight_language = 'python3'
# Require Sphinx 1.2 for build.
needs_sphinx = '1.2'
# Ignore any .rst files in the venv/ directory.
exclude_patterns = ['venv/*']
# Options for HTML output
# -----------------------
# Use our custom theme.
html_theme = 'pydoctheme'
html_theme_path = ['tools']
html_theme_options = {'collapsiblesidebar': True}
# Short title used e.g. for <title> HTML tags.
html_short_title = '%s Documentation' % release
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Path to find HTML templates.
templates_path = ['tools/templates']
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'https://docs.python.org/' + version
# Additional static files.
html_static_path = ['tools/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'Guido van Rossum\\and the Python development team'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distributing/index', 'distributing.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('installing/index', 'installing.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Python Setup and Usage', _stdauthor, 'manual'),
('faq/index', 'faq.tex',
'Python Frequently Asked Questions', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{[email protected]}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Get LaTeX to handle Unicode correctly
latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''}
# Options for Epub output
# -----------------------
epub_author = 'Python Documentation Authors'
epub_publisher = 'Python Software Foundation'
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
# Options for the link checker
# ----------------------------
# Ignore certain URLs.
linkcheck_ignore = [r'https://bugs.python.org/(issue)?\d+',
# Ignore PEPs for now, they all have permanent redirects.
r'http://www.python.org/dev/peps/pep-\d+']
# Options for extensions
# ----------------------
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# Translation
# -----------
gettext_compact = False
locale_dirs = ["locale"]
| 29.598039
| 82
| 0.661809
| 757
| 6,038
| 5.174373
| 0.410832
| 0.034465
| 0.018381
| 0.003064
| 0.015828
| 0.015828
| 0
| 0
| 0
| 0
| 0
| 0.004799
| 0.171746
| 6,038
| 203
| 83
| 29.743842
| 0.778444
| 0.406757
| 0
| 0
| 0
| 0
| 0.427273
| 0.059091
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020833
| 0
| 0.020833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e8aa5b3dcd6d5941acd4ac1423725bbe5688e5
| 2,178
|
py
|
Python
|
basic_stats.py/basic_stats.py
|
RahmB/basic_stats
|
b286fc84faa6dab17aa8d1e04d85fbf29a41ee12
|
[
"MIT"
] | null | null | null |
basic_stats.py/basic_stats.py
|
RahmB/basic_stats
|
b286fc84faa6dab17aa8d1e04d85fbf29a41ee12
|
[
"MIT"
] | null | null | null |
basic_stats.py/basic_stats.py
|
RahmB/basic_stats
|
b286fc84faa6dab17aa8d1e04d85fbf29a41ee12
|
[
"MIT"
] | null | null | null |
# Import the matplotlib module here. No other modules should be used.
# Import plotting library
import matplotlib.pyplot as plt
#import....
from os import *
# Import Numpy
import numpy as np
def mean(my_list): # This is the defintion in the head.
i = 0
my_sum = 0
for number in my_list:
my_sum = my_sum + my_list[i]
i+=1
mu = my_sum / i
print('mean = ' + str(mu))
return mu
def sd(my_list):
j = 0
sigma = 0
my_sumsd = 0
mu = mean(my_list)
for number in my_list:
my_sumsd = my_sumsd + (my_list[j] - mu)**2
j +=1
sigma = (my_sumsd/j)**(.5)
print('standard deviation = ' + str(sigma))
return sigma
def norm(my_list):
k = 0
l = 0
mu = mean(my_list)
sigma = sd(my_list)
for number in my_list:
if abs(my_list[l] - mu) < sigma:
k += 1
l += 1
else:
l += 1
dist = k / l
return dist
def is_norm(my_list):
dist = norm(my_list)
if 0.66 < dist < 0.70:
print('Data is normally distributed')
return True
else:
print('Data is not normally distributed')
return False
def is_skew(my_list):
m = 0
skew = 0
sumsk = 0
mu = mean(my_list)
sigma = sd(my_list)
for numbers in my_list:
sumsk = (my_list[m] - mu)**3 + sumsk
m +=1
skew = sumsk /(len(my_list)*sigma**3)
print('skewness = ' + str(skew))
if skew == 0:
print('skewness = 0, therefore sample is normally distributed')
else:
print('skewness =/= 0, therefore sample is not normally distributed')
def graph(my_list):
plt.hist(my_list,density=True, facecolor='b')
sigma = sd(my_list) #stores standard deviation
mu = mean(my_list) #stores mean
plt.title('my_list Histogram')
plt.xlabel('Number')
plt.ylabel('Probability')
plt.xlim(mu - 4*sigma, mu + 4*sigma)
plt.grid(True)
plt.show()
def stats(my_list):
mu = mean(my_list)
std = sd(my_list)
dist = norm(my_list)
graph(my_list)
is_norm(my_list)
is_skew(my_list)
return (mu, std, dist)
| 22
| 77
| 0.565657
| 328
| 2,178
| 3.621951
| 0.268293
| 0.161616
| 0.050505
| 0.050505
| 0.189394
| 0.183502
| 0.079966
| 0.048822
| 0.048822
| 0.048822
| 0
| 0.020889
| 0.318641
| 2,178
| 99
| 78
| 22
| 0.77965
| 0.085859
| 0
| 0.233766
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.038961
| 0
| 0.207792
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76e91ea24b8b713b4825e3c31ae941d3409f7123
| 4,987
|
py
|
Python
|
src/catkin_pkg/cli/tag_changelog.py
|
delftrobotics-forks/catkin_pkg
|
122eae0971f13a6080b72af6bb0eb52656c00bea
|
[
"BSD-3-Clause"
] | 2
|
2018-12-11T16:35:20.000Z
|
2019-01-23T16:42:17.000Z
|
usr/lib/python2.7/dist-packages/catkin_pkg/cli/tag_changelog.py
|
Roboy/roboy_managing_node_fpga
|
64ffe5aec2f2c98a051bb1a881849c195b8d052c
|
[
"BSD-3-Clause"
] | 1
|
2020-08-25T11:24:44.000Z
|
2020-09-22T14:01:26.000Z
|
src/catkin_pkg/cli/tag_changelog.py
|
plusone-robotics/catkin_pkg
|
9d68332b97db07f77a8b56bb5afaf89ec2536dfa
|
[
"BSD-3-Clause"
] | 4
|
2019-04-30T23:34:51.000Z
|
2021-07-04T07:55:34.000Z
|
"""This script renames the forthcoming section in changelog files with the upcoming version and the current date"""
from __future__ import print_function
import argparse
import datetime
import docutils.core
import os
import re
import sys
from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path
from catkin_pkg.changelog_generator import FORTHCOMING_LABEL
from catkin_pkg.package_version import bump_version
from catkin_pkg.packages import find_packages, verify_equal_package_versions
def get_forthcoming_label(rst):
document = docutils.core.publish_doctree(rst)
forthcoming_label = None
for child in document.children:
title = None
if isinstance(child, docutils.nodes.subtitle):
title = child
elif isinstance(child, docutils.nodes.section):
section = child
if len(section.children) > 0 and isinstance(section.children[0], docutils.nodes.title):
title = section.children[0]
if title and len(title.children) > 0 and isinstance(title.children[0], docutils.nodes.Text):
title_text = title.children[0].rawsource
if FORTHCOMING_LABEL.lower() in title_text.lower():
if forthcoming_label:
raise RuntimeError('Found multiple forthcoming sections')
forthcoming_label = title_text
return forthcoming_label
def rename_section(data, old_label, new_label):
valid_section_characters = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
def replace_section(match):
section_char = match.group(2)[0]
return new_label + '\n' + section_char * len(new_label)
pattern = '^(' + re.escape(old_label) + ')\n([' + re.escape(valid_section_characters) + ']+)$'
data, count = re.subn(pattern, replace_section, data, flags=re.MULTILINE)
if count == 0:
raise RuntimeError('Could not find section')
if count > 1:
raise RuntimeError('Found multiple matching sections')
return data
def main(sysargs=None):
parser = argparse.ArgumentParser(description='Tag the forthcoming section in the changelog files with an upcoming version number')
parser.add_argument('--bump', choices=('major', 'minor', 'patch'), default='patch', help='Which part of the version number to bump? (default: %(default)s)')
args = parser.parse_args(sysargs)
base_path = '.'
# find packages
packages = find_packages(base_path)
if not packages:
raise RuntimeError('No packages found')
print('Found packages: %s' % ', '.join([p.name for p in packages.values()]))
# fetch current version and verify that all packages have same version number
old_version = verify_equal_package_versions(packages.values())
new_version = bump_version(old_version, args.bump)
print('Tag version %s' % new_version)
# check for changelog entries
changelogs = []
missing_forthcoming = []
already_tagged = []
for pkg_path, package in packages.items():
changelog_path = os.path.join(base_path, pkg_path, CHANGELOG_FILENAME)
if not os.path.exists(changelog_path):
missing_forthcoming.append(package.name)
continue
changelog = get_changelog_from_path(changelog_path, package.name)
if not changelog:
missing_forthcoming.append(package.name)
continue
# check that forthcoming section exists
forthcoming_label = get_forthcoming_label(changelog.rst)
if not forthcoming_label:
missing_forthcoming.append(package.name)
continue
# check that new_version section does not exist yet
try:
changelog.get_content_of_version(new_version)
already_tagged.append(package.name)
continue
except KeyError:
pass
changelogs.append((package.name, changelog_path, changelog, forthcoming_label))
if missing_forthcoming:
print('The following packages do not have a forthcoming section in their changelog file: %s' % ', '.join(sorted(missing_forthcoming)), file=sys.stderr)
if already_tagged:
print("The following packages do already have a section '%s' in their changelog file: %s" % (new_version, ', '.join(sorted(already_tagged))), file=sys.stderr)
# rename forthcoming sections to new_version including current date
new_changelog_data = []
new_label = '%s (%s)' % (new_version, datetime.date.today().isoformat())
for (pkg_name, changelog_path, changelog, forthcoming_label) in changelogs:
print("Renaming section '%s' to '%s' in package '%s'..." % (forthcoming_label, new_label, pkg_name))
data = rename_section(changelog.rst, forthcoming_label, new_label)
new_changelog_data.append((changelog_path, data))
print('Writing updated changelog files...')
for (changelog_path, data) in new_changelog_data:
with open(changelog_path, 'wb') as f:
f.write(data.encode('utf-8'))
| 43.745614
| 166
| 0.687989
| 611
| 4,987
| 5.430442
| 0.265139
| 0.067511
| 0.025618
| 0.030139
| 0.098553
| 0.06962
| 0.031344
| 0.031344
| 0
| 0
| 0
| 0.002802
| 0.212753
| 4,987
| 113
| 167
| 44.132743
| 0.842333
| 0.076599
| 0
| 0.077778
| 0
| 0
| 0.13017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0.011111
| 0.122222
| 0
| 0.2
| 0.077778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76eaa983d4b2d01d9a4e9daae5b69684ff9a0e05
| 1,199
|
py
|
Python
|
tests/optims/distributed_adamw_test.py
|
AswinRetnakumar/Machina
|
6519935ca4553192ac99fc1c7c1e7cab9dd72693
|
[
"MIT"
] | 302
|
2019-03-13T10:21:29.000Z
|
2022-03-25T10:01:46.000Z
|
tests/optims/distributed_adamw_test.py
|
AswinRetnakumar/Machina
|
6519935ca4553192ac99fc1c7c1e7cab9dd72693
|
[
"MIT"
] | 50
|
2019-03-13T09:45:00.000Z
|
2021-12-23T18:32:00.000Z
|
tests/optims/distributed_adamw_test.py
|
AswinRetnakumar/Machina
|
6519935ca4553192ac99fc1c7c1e7cab9dd72693
|
[
"MIT"
] | 55
|
2019-03-17T01:59:57.000Z
|
2022-03-28T01:13:40.000Z
|
import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
| 26.065217
| 50
| 0.559633
| 129
| 1,199
| 5.062016
| 0.465116
| 0.11026
| 0.099541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021795
| 0.349458
| 1,199
| 45
| 51
| 26.644444
| 0.815385
| 0
| 0
| 0
| 0
| 0
| 0.032527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.2
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76ebc2ee4ceeeeacb1f5e2ff455580aa77112974
| 6,352
|
py
|
Python
|
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
|
nikola3794/edge-evaluation-PASCAL-MT-tmp
|
d3bc7164608a20eb6351c1d41219213927ae6239
|
[
"MIT"
] | null | null | null |
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
|
nikola3794/edge-evaluation-PASCAL-MT-tmp
|
d3bc7164608a20eb6351c1d41219213927ae6239
|
[
"MIT"
] | null | null | null |
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
|
nikola3794/edge-evaluation-PASCAL-MT-tmp
|
d3bc7164608a20eb6351c1d41219213927ae6239
|
[
"MIT"
] | null | null | null |
# This code is referenced from
# https://github.com/facebookresearch/astmt/
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# License: Attribution-NonCommercial 4.0 International
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
import numpy as np
class SoftMaxwithLoss(Module):
"""
This function returns cross entropy loss for semantic segmentation
"""
def __init__(self):
super(SoftMaxwithLoss, self).__init__()
self.softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.NLLLoss(ignore_index=255)
def forward(self, out, label):
assert not label.requires_grad
# out shape batch_size x channels x h x w
# label shape batch_size x 1 x h x w
label = label[:, 0, :, :].long()
loss = self.criterion(self.softmax(out), label)
return loss
class BalancedCrossEntropyLoss(Module):
"""
Balanced Cross Entropy Loss with optional ignore regions
"""
def __init__(self, size_average=True, batch_average=True, pos_weight=None):
super(BalancedCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
self.pos_weight = pos_weight
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
# Weighting of the loss, default is HED-style
if self.pos_weight is None:
num_labels_pos = torch.sum(labels)
num_labels_neg = torch.sum(1.0 - labels)
num_total = num_labels_pos + num_labels_neg
w = num_labels_neg / num_total
else:
w = self.pos_weight
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None and not self.pos_weight:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()
w = num_labels_neg / num_total
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = w * loss_pos + (1 - w) * loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class BinaryCrossEntropyLoss(Module):
"""
Binary Cross Entropy with ignore regions, not balanced.
"""
def __init__(self, size_average=True, batch_average=True):
super(BinaryCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = loss_pos + loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class DepthLoss(nn.Module):
"""
Loss for depth prediction. By default L1 loss is used.
"""
def __init__(self, loss='l1'):
super(DepthLoss, self).__init__()
if loss == 'l1':
self.loss = nn.L1Loss()
else:
raise NotImplementedError('Loss {} currently not supported in DepthLoss'.format(loss))
def forward(self, out, label):
mask = (label != 255)
return self.loss(torch.masked_select(out, mask), torch.masked_select(label, mask))
class Normalize(nn.Module):
def __init__(self):
super(Normalize, self).__init__()
def forward(self, bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
top = bottom.div(qn)
return top
class NormalsLoss(Module):
"""
L1 loss with ignore labels
normalize: normalization for surface normals
"""
def __init__(self, size_average=True, normalize=False, norm=1):
super(NormalsLoss, self).__init__()
self.size_average = size_average
if normalize:
self.normalize = Normalize()
else:
self.normalize = None
if norm == 1:
print('Using L1 loss for surface normals')
self.loss_func = F.l1_loss
elif norm == 2:
print('Using L2 loss for surface normals')
self.loss_func = F.mse_loss
else:
raise NotImplementedError
def forward(self, out, label, ignore_label=255):
assert not label.requires_grad
mask = (label != ignore_label)
n_valid = torch.sum(mask).item()
if self.normalize is not None:
out_norm = self.normalize(out)
loss = self.loss_func(torch.masked_select(out_norm, mask), torch.masked_select(label, mask), reduction='sum')
else:
loss = self.loss_func(torch.masked_select(out, mask), torch.masked_select(label, mask), reduction='sum')
if self.size_average:
if ignore_label:
ret_loss = torch.div(loss, max(n_valid, 1e-6))
return ret_loss
else:
ret_loss = torch.div(loss, float(np.prod(label.size())))
return ret_loss
return loss
| 32.080808
| 121
| 0.62012
| 842
| 6,352
| 4.442993
| 0.191211
| 0.035285
| 0.036087
| 0.030473
| 0.511895
| 0.461909
| 0.437316
| 0.428228
| 0.381716
| 0.359262
| 0
| 0.01257
| 0.273615
| 6,352
| 197
| 122
| 32.243655
| 0.798223
| 0.098709
| 0
| 0.48
| 0
| 0
| 0.021292
| 0
| 0
| 0
| 0
| 0
| 0.032
| 1
| 0.096
| false
| 0
| 0.04
| 0
| 0.248
| 0.016
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76edfc1706c920930c1bc7aab823c6e785689aff
| 1,406
|
py
|
Python
|
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py
|
zyeak/leetcode
|
5d7bf16bd755224223c71e8e6df81c1ff49daadc
|
[
"MIT"
] | null | null | null |
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py
|
zyeak/leetcode
|
5d7bf16bd755224223c71e8e6df81c1ff49daadc
|
[
"MIT"
] | null | null | null |
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py
|
zyeak/leetcode
|
5d7bf16bd755224223c71e8e6df81c1ff49daadc
|
[
"MIT"
] | null | null | null |
# solution 1:
class Solution1:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1 or numRows >= len(s):
return s
L = [''] * numRows
index, step = 0, 1
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(L)
# Solution 2
class Solution:
def convert(self, s: str, numRows: int) -> str:
# If we have only one row then we can return the string as it is
if numRows < 2:
return s
# We will create an empty string for each row and then fill each element in each row
# from row = 0 to row = numRows-1, if we reach bottom (i.e. row = numRows-1)
# then we move up. Similarly if we reach top, we change direction and move down
# Finally after filling up all the four rows we join them row0 + row1 +.. numRows
row = 0
result = [""]*numRows
for character in s:
if row == 0:
move_down = True
elif row == numRows-1:
move_down = False
result[row] += character
row = (row+1) if move_down else row-1
return "".join(result)
if __name__ == '__main__':
# begin
s = Solution()
print(s.convert("PAYPALISHIRING", 3))
| 31.244444
| 92
| 0.516358
| 191
| 1,406
| 3.743456
| 0.408377
| 0.055944
| 0.046154
| 0.041958
| 0.092308
| 0.092308
| 0.092308
| 0.092308
| 0.092308
| 0
| 0
| 0.025641
| 0.389758
| 1,406
| 45
| 93
| 31.244444
| 0.807692
| 0.289474
| 0
| 0.129032
| 0
| 0
| 0.0222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0
| 0
| 0.258065
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76ef2321c51f2dff2461f9538c87721e5bf560d2
| 2,013
|
py
|
Python
|
FakeNewsClassifierWithLSTM.py
|
pratikasarkar/nlp
|
275c80ab10f6dc4b4553bbcc5e5c8a4d00ff9fea
|
[
"Unlicense"
] | null | null | null |
FakeNewsClassifierWithLSTM.py
|
pratikasarkar/nlp
|
275c80ab10f6dc4b4553bbcc5e5c8a4d00ff9fea
|
[
"Unlicense"
] | null | null | null |
FakeNewsClassifierWithLSTM.py
|
pratikasarkar/nlp
|
275c80ab10f6dc4b4553bbcc5e5c8a4d00ff9fea
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 13:42:45 2021
@author: ASUS
"""
import pandas as pd
df = pd.read_csv(r'D:\nlp\fake-news-data\train.csv')
df = df.dropna()
X = df.drop('label',axis = 1)
y = df['label']
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import one_hot
# Vocabulary size
voc_size = 5000
# One Hot Representation
messages = X.copy()
messages.reset_index(inplace = True)
import nltk
import re
from nltk.corpus import stopwords
# Dataset Preprocessing
from nltk.stem import PorterStemmer
ps = PorterStemmer()
corpus = []
for i in range(len(messages)):
print(i)
review = re.sub('[^a-zA-Z]',' ',messages['title'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if word not in stopwords.words('english')]
review = " ".join(review)
corpus.append(review)
onehot_repr = [one_hot(words,voc_size) for words in corpus]
sent_len = 20
embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre')
# Creating the model
embedding_vector_features = 40
model = Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len))
model.add(LSTM(100))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
model.summary()
import numpy as np
X_final = np.array(embedded_doc)
y_final = np.array(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42)
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
y_pred = model.predict_classes(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test,y_pred)
acc = accuracy_score(y_test,y_pred)
| 27.958333
| 103
| 0.752608
| 313
| 2,013
| 4.667732
| 0.469649
| 0.03833
| 0.052019
| 0.043806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019864
| 0.12469
| 2,013
| 71
| 104
| 28.352113
| 0.809308
| 0.076006
| 0
| 0
| 0
| 0
| 0.056818
| 0.016775
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.276596
| 0
| 0.276596
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f17efadc147bee33131952c1b99b7ec42d46c2
| 1,890
|
py
|
Python
|
tests/test_auto_scan_logsigmoid.py
|
yeliang2258/Paddle2ONNX
|
5eeef77f2f90d1e2a45dacf6eb1cc5f35f6224a4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_auto_scan_logsigmoid.py
|
yeliang2258/Paddle2ONNX
|
5eeef77f2f90d1e2a45dacf6eb1cc5f35f6224a4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_auto_scan_logsigmoid.py
|
yeliang2258/Paddle2ONNX
|
5eeef77f2f90d1e2a45dacf6eb1cc5f35f6224a4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-29T04:35:49.000Z
|
2022-01-29T04:35:49.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import OPConvertAutoScanTest, BaseNet
from hypothesis import reproduce_failure
import hypothesis.strategies as st
import numpy as np
import unittest
import paddle
class Net(BaseNet):
"""
simple Net
"""
def forward(self, inputs):
"""
forward
"""
x = paddle.nn.functional.log_sigmoid(inputs)
return x
class TestLogsigmoidConvert(OPConvertAutoScanTest):
"""
api: paddle.nn.functional.log_sigmoid
OPset version: 7, 9, 15
"""
def sample_convert_config(self, draw):
input_shape = draw(
st.lists(
st.integers(
min_value=20, max_value=100),
min_size=4,
max_size=4))
input_spec = [-1] * len(input_shape)
dtype = draw(st.sampled_from(["float32", "float64"]))
config = {
"op_names": ["logsigmoid"],
"test_data_shapes": [input_shape],
"test_data_types": [[dtype]],
"opset_version": [7, 9, 15],
"input_spec_shape": [input_spec],
}
models = Net(config)
return (config, models)
def test(self):
self.run_and_statis(max_examples=30)
if __name__ == "__main__":
unittest.main()
| 26.619718
| 74
| 0.635979
| 234
| 1,890
| 4.982906
| 0.589744
| 0.051458
| 0.022298
| 0.027444
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021676
| 0.267725
| 1,890
| 70
| 75
| 27
| 0.820809
| 0.351323
| 0
| 0
| 0
| 0
| 0.086505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.393939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f217cfd33281d5ca8af791540db7576b28df64
| 4,408
|
py
|
Python
|
oasislmf/utils/concurrency.py
|
bbetov-corelogic/OasisLMF
|
fcb9a595ec6eb30c2ed3b9b67152c2f27fc0082b
|
[
"BSD-3-Clause"
] | null | null | null |
oasislmf/utils/concurrency.py
|
bbetov-corelogic/OasisLMF
|
fcb9a595ec6eb30c2ed3b9b67152c2f27fc0082b
|
[
"BSD-3-Clause"
] | null | null | null |
oasislmf/utils/concurrency.py
|
bbetov-corelogic/OasisLMF
|
fcb9a595ec6eb30c2ed3b9b67152c2f27fc0082b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
import sys
import types
import billiard
from signal import (
signal,
SIGINT,
)
from threading import (
Event,
Thread,
)
__all__ = [
'multiprocess',
'multithread',
'SignalHandler',
'Task'
]
class SignalHandler(object):
def __init__(self, stopper, threads):
self.stopper = stopper
self.threads = threads
def __call__(self, signum, frame):
self.stopper.set()
for task in self.threads:
task.join()
sys.exit(0)
class Task(object):
def __init__(self, func, args=(), key=None):
self._func = func
self._args = args
self._key = key if key is not None else func.__name__
self._result = None
self._is_done = False
@property
def func(self):
"""
Task function/method property - getter only.
:getter: Gets the task function/method object
"""
return self._func
@property
def args(self):
"""
Task function/method arguments property - getter only.
:getter: Gets the task function/method arguments
"""
return self._args
@property
def key(self):
"""
Task function/method key - getter only.
:getter: Gets the task function/method key
"""
return self._key
@property
def result(self):
"""
Task function/method result property.
:getter: Gets the task function/method result (produced by calling
the function on the defined arguments)
:setter: Sets the task function/method result
"""
return self._result
@result.setter
def result(self, r):
self._result = r
self._is_done = True
@property
def is_done(self):
"""
Task function/method status property - getter only.
:getter: Gets the task function/method status
"""
return self._is_done
def multithread(tasks, pool_size=10):
"""
Executes several tasks concurrently via ``threading`` threads, puts the
results into a queue, and generates these back to the caller.
"""
task_q = Queue()
num_tasks = 0
for task in tasks:
task_q.put(task)
num_tasks += 1
def run(i, task_q, result_q, stopper):
while not stopper.is_set():
try:
task = task_q.get_nowait()
except Empty:
break
else:
task.result = task.func(*task.args) if task.args else task.func()
if type(task.result) in (types.GeneratorType, list, tuple, set):
for r in task.result:
result_q.put((task.key, r,))
else:
result_q.put((task.key, task.result,))
task_q.task_done()
result_q = Queue()
stopper = Event()
threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size))
handler = SignalHandler(stopper, threads)
signal(SIGINT, handler)
for thread in threads:
thread.start()
task_q.join()
while not result_q.empty():
key, result = result_q.get_nowait()
yield key, result
def multiprocess(tasks, pool_size=10):
"""
Executes several tasks concurrently via Python ``multiprocessing``
processes, puts the results into a queue, and generates these back to the
caller.
"""
pool = billiard.Pool(pool_size)
result_q = Queue()
def build_results(result):
if type(result) in (types.GeneratorType, list, tuple, set):
for r in result:
result_q.put(r)
else:
result_q.put(result)
for task in tasks:
run = pool.apply_async(task.func, args=task.args, callback=build_results)
run.get()
pool.close()
pool.join()
while not result_q.empty():
result = result_q.get_nowait()
yield result
| 23.078534
| 103
| 0.595735
| 530
| 4,408
| 4.783019
| 0.241509
| 0.033136
| 0.078107
| 0.049704
| 0.314793
| 0.26075
| 0.192505
| 0.192505
| 0.176331
| 0.078895
| 0
| 0.002646
| 0.313975
| 4,408
| 190
| 104
| 23.2
| 0.835648
| 0.207123
| 0
| 0.142857
| 0
| 0
| 0.012191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116071
| false
| 0
| 0.133929
| 0
| 0.3125
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f317598810c56fd2ed005b83b2ae2293df83ae
| 24,928
|
py
|
Python
|
vixen/project.py
|
amoeba/vixen
|
a2b450fa918e23da644b1818807577139a0ae6e8
|
[
"BSD-3-Clause"
] | 10
|
2017-09-19T11:00:11.000Z
|
2021-08-12T08:56:15.000Z
|
vixen/project.py
|
amoeba/vixen
|
a2b450fa918e23da644b1818807577139a0ae6e8
|
[
"BSD-3-Clause"
] | 22
|
2018-01-14T11:22:14.000Z
|
2020-04-08T00:01:29.000Z
|
vixen/project.py
|
amoeba/vixen
|
a2b450fa918e23da644b1818807577139a0ae6e8
|
[
"BSD-3-Clause"
] | 3
|
2018-01-24T16:55:01.000Z
|
2019-06-17T04:26:33.000Z
|
import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, header, dialect = _get_csv_headers(fname)
if not has_header:
return False, "The CSV file does not appear to have a header."
if 'path' not in header:
msg = "The CSV file does not have a 'path' column."
return False, msg
tags = {x: header.index(x.name) for x in self.tags if x.name in header}
path_idx = header.index('path')
TRUE = ('1', 't', 'true', 'y', 'yes')
type_map = {
'bool': lambda x: x.lower() in TRUE,
'string': lambda x: x,
'text': lambda x: x,
'int': int,
'float': float
}
count = 0
total = 0
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
next(reader) # Skip header
for record in reader:
total += 1
path = record[path_idx]
rpath = relpath(path, self.path)
index = self._relpath2index.get(rpath, None)
media = self._media.get(rpath)
if index is not None:
count += 1
for tag, header_index in tags.items():
data = record[header_index]
try:
value = type_map[tag.type](data)
if media is not None:
media.tags[tag.name] = value
else:
self._tag_data[tag.name][index] = value
except ValueError:
pass
msg = "Read tags for %d paths out of %d entries." % (count, total)
if count == 0 and total > 0:
msg += ("\nPlease check that your path column matches "
"the media paths.")
return False, msg
else:
msg += ("\nPlease check the imported tags and make sure you "
"save the project.")
return True, msg
def load(self, fp=None):
"""Load media info from opened file object.
"""
if fp is None:
if not exists(self.save_file):
return
fp = open_file(self.save_file, 'rb')
else:
fp = open_file(fp, 'rb')
data = json_tricks.load(
fp, preserve_order=False, ignore_comments=False
)
fp.close()
self.name = data.get('name', '')
self.description = data.get('description', '')
self.path = data.get('path')
self.tags = [TagInfo(name=x[0], type=x[1]) for x in data['tags']]
self.processors = [processor.load(x)
for x in data.get('processors', [])]
version = data.get('version')
if version == 1:
self._read_version1_media(data['media'])
else:
self._data = data['media_data']
self._tag_data = data['tag_data']
self._relpath2index = data['relpath2index']
root = Directory()
root.__setstate__(data.get('root'))
self.extensions = root.extensions
self.root = root
self.number_of_files = len(self._relpath2index)
def save(self):
"""Save current media info to a file object
"""
if len(self.save_file) > 0:
self.save_as(self.save_file)
self._update_last_save_time()
else:
raise IOError("No valid save file set.")
def save_as(self, fp):
"""Save copy to specified path.
"""
fp = open_file(fp, 'wb')
tags = [(t.name, t.type) for t in self.tags]
root = self.root.__getstate__()
processors = [processor.dump(x) for x in self.processors]
data = dict(
version=2, path=self.path, name=self.name,
description=self.description, tags=tags,
media_data=self._data, tag_data=self._tag_data,
relpath2index=self._relpath2index,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
def scan(self, refresh=False):
"""Find all the media recursively inside the root directory.
This will not clobber existing records but will add any new ones.
"""
self._setup_root()
def _scan(dir):
for f in dir.files:
if not self.has_media(f.relpath) or refresh:
data = get_media_data(f.path, f.relpath)
self.update(data)
for d in dir.directories:
if refresh:
d.refresh()
_scan(d)
if refresh:
self.root.refresh()
_scan(self.root)
self.number_of_files = len(self._relpath2index)
def search(self, q):
"""A generator which yields the (filename, relpath) for each file
satisfying the search query.
"""
logger.info('Searching for %s', q)
try:
parsed_q = self._query_parser.parse(q)
except Exception:
logger.warn("Invalid search expression: %s", q)
print("Invalid search expression: %s" % q)
return
tag_types = self._get_tag_types()
_cleanup_query(parsed_q, tag_types)
for key, index in self._relpath2index.items():
if _search_media(parsed_q, index, self._get_media_attr):
yield basename(key), key
def refresh(self):
logger.info('Refreshing project: %s', self.name)
self.clean()
self.scan(refresh=True)
# #### Private protocol ################################################
def _setup_root(self):
path = abspath(expanduser(self.path))
root = self.root
if root is None or realpath(root.path) != realpath(path):
self.root = Directory(path=path, extensions=self.extensions)
def _tags_default(self):
return [TagInfo(name='completed', type='bool')]
def _save_file_default(self):
if len(self.name) > 0:
fname = sanitize_name(self.name) + '.vxn'
d = get_project_dir()
return get_non_existing_filename(join(d, fname))
else:
return ''
def _update_last_save_time(self):
self.last_save_time = get_file_saved_time(self.save_file)
def _last_save_time_default(self):
if exists(self.save_file):
return get_file_saved_time(self.save_file)
else:
return ''
def _name_changed(self, name):
if len(name) > 0:
old_save_file = self.save_file
old_dir = dirname(old_save_file)
new_save_file = join(old_dir, sanitize_name(name) + '.vxn')
if new_save_file != old_save_file:
self.save_file = new_save_file
if exists(old_save_file):
shutil.move(old_save_file, self.save_file)
def _extensions_changed(self, ext):
if self.root is not None:
self.root.extensions = ext
def _extensions_items_changed(self):
if self.root is not None:
self.root.extensions = self.extensions
def _get_tag_types(self):
result = dict(COMMON_TAGS)
result.update(dict((t.name, t.type) for t in self.tags))
return result
def _make_schema(self):
from whoosh.fields import BOOLEAN, DATETIME, TEXT, Schema
kw = dict(
type=TEXT, file_name=TEXT, path=TEXT,
mtime=DATETIME, ctime=DATETIME, size=INT
)
type_to_field = dict(
string=TEXT, text=TEXT, int=INT, float=FLOAT, bool=BOOLEAN
)
for tag in self.tags:
kw[tag.name] = type_to_field[tag.type]
return Schema(**kw)
def _make_query_parser(self):
schema = self._make_schema()
qp = qparser.QueryParser('path', schema=schema)
qp.add_plugin(qparser.GtLtPlugin())
from whoosh.qparser.dateparse import DateParserPlugin
qp.add_plugin(DateParserPlugin())
return qp
def __query_parser_default(self):
return self._make_query_parser()
def __data_default(self):
data = {}
for key in MediaData._fields:
data[key] = []
return data
def __tag_data_default(self):
tags = {}
for key in self.tags:
tags[key.name] = []
return tags
def _media_tag_handler(self, obj, tname, old, new):
index = self._relpath2index[obj.relpath]
for tag in new.changed:
self._tag_data[tag][index] = obj.tags[tag]
def _read_version1_media(self, media):
data = self.__data_default()
tag_data = self.__tag_data_default()
relpath2index = {}
keymap = dict.fromkeys(MediaData._fields)
for k in keymap:
keymap[k] = k
keymap['_ctime'] = 'ctime_'
keymap['_mtime'] = 'mtime_'
for index, (key, m) in enumerate(media):
relpath2index[key] = index
tags = m.pop('tags')
for tname, v in tags.items():
tag_data[tname].append(v)
for k, v in m.items():
data[keymap[k]].append(v)
if 'file_name' not in m:
data['file_name'].append(basename(key))
data['mtime_'] = [datetime_to_long(x) for x in data['mtime_']]
data['ctime_'] = [datetime_to_long(x) for x in data['ctime_']]
self._data = data
self._tag_data = tag_data
self._relpath2index = relpath2index
def _delete_record(self, index, relpath):
for key in MediaData._fields:
del self._data[key][index]
for key in self._tag_data:
del self._tag_data[key][index]
if relpath in self._media:
del self._media[relpath]
del self._relpath2index[relpath]
def _replace_with_last_record(self, index, last):
_data = self._data
_tag_data = self._tag_data
for key in MediaData._fields:
_data[key][index] = _data[key][last]
for key in self._tag_data:
_tag_data[key][index] = _tag_data[key][last]
last_relpath = _data['relpath'][last]
self._relpath2index[last_relpath] = index
def _save_as_v1(self, fp):
"""Save copy to specified path.
This mainly exists for testing and making sure we still read the old
saved files.
"""
def _rewrite_dir(state):
"Rewrite directories in the old format."
state['files'] = [x[0] for x in state['files']]
state['directories'] = [_rewrite_dir(d)
for d in state['directories']]
state.pop('relpath')
state.pop('name')
return state
fp = open_file(fp, 'wb')
media = [(key, self.get(key).to_dict()) for key in self._relpath2index]
tags = [(t.name, t.type) for t in self.tags]
root = _rewrite_dir(self.root.__getstate__())
processors = [processor.dump(x) for x in self.processors]
for k, m in media:
m['_ctime'] = long_to_datetime(m['_ctime'])
m['_mtime'] = long_to_datetime(m['_mtime'])
data = dict(
version=1, path=self.path, name=self.name,
description=self.description, tags=tags, media=media,
root=root, processors=processors
)
json_tricks.dump(data, fp, compression=True)
fp.close()
logger.info('Saved project: %s', self.name)
| 33.193076
| 81
| 0.565709
| 3,098
| 24,928
| 4.38541
| 0.131375
| 0.017003
| 0.017812
| 0.007213
| 0.230973
| 0.152804
| 0.126527
| 0.102532
| 0.072648
| 0.058958
| 0
| 0.003705
| 0.328747
| 24,928
| 750
| 82
| 33.237333
| 0.808223
| 0.098644
| 0
| 0.201426
| 0
| 0
| 0.048846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092692
| false
| 0.001783
| 0.040998
| 0.005348
| 0.254902
| 0.005348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f36db1130141ba9e8823d77aa6984660a91f95
| 5,659
|
py
|
Python
|
prance/util/translator.py
|
elemental-lf/prance
|
d4bb6d2edf00ef18540a140025df8ad75d01fc16
|
[
"MIT"
] | null | null | null |
prance/util/translator.py
|
elemental-lf/prance
|
d4bb6d2edf00ef18540a140025df8ad75d01fc16
|
[
"MIT"
] | null | null | null |
prance/util/translator.py
|
elemental-lf/prance
|
d4bb6d2edf00ef18540a140025df8ad75d01fc16
|
[
"MIT"
] | null | null | null |
"""This submodule contains a JSON reference translator."""
__author__ = 'Štěpán Tomsa'
__copyright__ = 'Copyright © 2021 Štěpán Tomsa'
__license__ = 'MIT'
__all__ = ()
import prance.util.url as _url
def _reference_key(ref_url, item_path):
"""
Return a portion of the dereferenced URL.
format - ref-url_obj-path
"""
return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:])
def _local_ref(path):
url = '#/' + '/'.join(path)
return {'$ref': url}
# Underscored to allow some time for the public API to be stabilized.
class _RefTranslator:
"""
Resolve JSON pointers/references in a spec by translation.
References to objects in other files are copied to the /components/schemas
object of the root document, while being translated to point to the the new
object locations.
"""
def __init__(self, specs, url):
"""
Construct a JSON reference translator.
The translated specs are in the `specs` member after a call to
`translate_references` has been made.
If a URL is given, it is used as a base for calculating the absolute
URL of relative file references.
:param dict specs: The parsed specs in which to translate any references.
:param str url: [optional] The URL to base relative references on.
"""
import copy
self.specs = copy.deepcopy(specs)
self.__strict = True
self.__reference_cache = {}
self.__collected_references = {}
if url:
self.url = _url.absurl(url)
url_key = (_url.urlresource(self.url), self.__strict)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
self.__reference_cache[url_key] = self.specs
else:
self.url = None
def translate_references(self):
"""
Iterate over the specification document, performing the translation.
Traverses over the whole document, adding the referenced object from
external files to the /components/schemas object in the root document
and translating the references to the new location.
"""
self.specs = self._translate_partial(self.url, self.specs)
# Add collected references to the root document.
if self.__collected_references:
if 'components' not in self.specs:
self.specs['components'] = {}
if 'schemas' not in self.specs['components']:
self.specs['components'].update({'schemas': {}})
self.specs['components']['schemas'].update(self.__collected_references)
def _dereference(self, ref_url, obj_path):
"""
Dereference the URL and object path.
Returns the dereferenced object.
:param mixed ref_url: The URL at which the reference is located.
:param list obj_path: The object path within the URL resource.
:param tuple recursions: A recursion stack for resolving references.
:return: A copy of the dereferenced value, with all internal references
resolved.
"""
# In order to start dereferencing anything in the referenced URL, we have
# to read and parse it, of course.
contents = _url.fetch_url(ref_url, self.__reference_cache, strict=self.__strict)
# In this inner parser's specification, we can now look for the referenced
# object.
value = contents
if len(obj_path) != 0:
from prance.util.path import path_get
try:
value = path_get(value, obj_path)
except (KeyError, IndexError, TypeError) as ex:
raise _url.ResolutionError('Cannot resolve reference "%s": %s'
% (ref_url.geturl(), str(ex)))
# Deep copy value; we don't want to create recursive structures
import copy
value = copy.deepcopy(value)
# Now resolve partial specs
value = self._translate_partial(ref_url, value)
# That's it!
return value
def _translate_partial(self, base_url, partial):
changes = dict(tuple(self._translating_iterator(base_url, partial, ())))
paths = sorted(changes.keys(), key = len)
from prance.util.path import path_set
for path in paths:
value = changes[path]
if len(path) == 0:
partial = value
else:
path_set(partial, list(path), value, create = True)
return partial
def _translating_iterator(self, base_url, partial, path):
from prance.util.iterators import reference_iterator
for _, ref_string, item_path in reference_iterator(partial):
ref_url, obj_path = _url.split_url_reference(base_url, ref_string)
full_path = path + item_path
if ref_url.path == self.url.path:
# Reference to the root document.
ref_path = obj_path
else:
# Reference to a non-root document.
ref_key = _reference_key(ref_url, obj_path)
if ref_key not in self.__collected_references:
self.__collected_references[ref_key] = None
ref_value = self._dereference(ref_url, obj_path)
self.__collected_references[ref_key] = ref_value
ref_path = ['components', 'schemas', ref_key]
ref_obj = _local_ref(ref_path)
yield full_path, ref_obj
| 36.044586
| 88
| 0.621841
| 698
| 5,659
| 4.842407
| 0.290831
| 0.023077
| 0.040828
| 0.019231
| 0.050296
| 0.016568
| 0
| 0
| 0
| 0
| 0
| 0.002015
| 0.298463
| 5,659
| 156
| 89
| 36.275641
| 0.849118
| 0.351652
| 0
| 0.069444
| 0
| 0
| 0.051245
| 0
| 0.013889
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f498ab6421077add9a6f59a90898f50d7b050c
| 3,501
|
py
|
Python
|
tests/test_bugs.py
|
mmibrah2/OpenQL
|
8fd4ccb0fa342f777b827235748fa5f6592b0c25
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bugs.py
|
mmibrah2/OpenQL
|
8fd4ccb0fa342f777b827235748fa5f6592b0c25
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bugs.py
|
mmibrah2/OpenQL
|
8fd4ccb0fa342f777b827235748fa5f6592b0c25
|
[
"Apache-2.0"
] | null | null | null |
import os
import filecmp
import unittest
import numpy as np
from openql import openql as ql
from utils import file_compare
curdir = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.join(curdir, 'test_output')
class Test_bugs(unittest.TestCase):
@classmethod
def setUp(self):
ql.initialize()
ql.set_option('output_dir', output_dir)
ql.set_option('use_default_gates', 'yes')
ql.set_option('log_level', 'LOG_WARNING')
# @unittest.expectedFailure
# @unittest.skip
def test_typecast(self):
sweep_points = [1,2]
num_circuits = 1
num_qubits = 2
platf = ql.Platform("starmon", 'cc_light')
p = ql.Program('test_bug', platf, num_qubits)
p.set_sweep_points(sweep_points)
k = ql.Kernel('kernel1', platf, num_qubits)
qubit = 1
k.identity(np.int(qubit))
k.identity(np.int32(qubit))
k.identity(np.int64(qubit))
k.identity(np.uint(qubit))
k.identity(np.uint32(qubit))
k.identity(np.uint64(qubit))
# add the kernel to the program
p.add_kernel(k)
# relates to https://github.com/QE-Lab/OpenQL/issues/171
# various runs of compiles were generating different results or in the best
# case strange errors. So multiple (NCOMPILES) runs of compile are executed
# to make sure there is no error and output generated in all these runs is same
# JvS: more likely, it also had to do with the classical register allocator
# depending on stuff like Python's garbage collection to free a register.
# The register numbers have to be hardcoded now for that reason.
def test_stateful_behavior(self):
ql.set_option('optimize', 'no')
ql.set_option('scheduler', 'ALAP')
platform = ql.Platform("myPlatform", 'cc_light')
sweep_points = [1]
nqubits = 3
nregs = 3
p = ql.Program("statelessProgram", platform, nqubits, nregs)
p.set_sweep_points(sweep_points)
k = ql.Kernel("aKernel", platform, nqubits, nregs)
k.prepz(0)
k.gate('rx180', [0])
k.measure(0)
rd = ql.CReg(0)
rs1 = ql.CReg(1)
rs2 = ql.CReg(2)
k.classical(rs1, ql.Operation(3))
k.classical(rs1, ql.Operation(4))
k.classical(rd, ql.Operation(rs1, '+', rs2))
p.add_kernel(k)
NCOMPILES=50
QISA_fn = os.path.join(output_dir, p.name+'_last.qasm')
for i in range(NCOMPILES):
p.compile()
self.setUpClass()
QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')
os.rename(QISA_fn,QISA_fn_i)
for i in range(NCOMPILES-1):
QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm')
QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm')
self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2))
# Unclear how this test works.
# When clear, enable it again.
# Now it fails, not clear how to repair, so it is disabled.
# def test_empty_infinite_loop(self):
# name = 'empty_infinite_loop'
# in_fn = 'test_' + name + '.cq'
# out_fn = 'test_output/' + name + '_out.cq'
# gold_fn = 'golden/' + name + '_out.cq'
# ql.initialize()
# #ql.set_option('log_level', 'LOG_DEBUG')
# ql.compile(in_fn)
# self.assertTrue(file_compare(out_fn, gold_fn))
if __name__ == '__main__':
unittest.main()
| 32.719626
| 83
| 0.622394
| 498
| 3,501
| 4.190763
| 0.389558
| 0.023
| 0.031624
| 0.038333
| 0.172976
| 0.114039
| 0.092956
| 0.081457
| 0.081457
| 0.034499
| 0
| 0.017228
| 0.253927
| 3,501
| 106
| 84
| 33.028302
| 0.781776
| 0.295344
| 0
| 0.0625
| 0
| 0
| 0.086779
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 1
| 0.046875
| false
| 0
| 0.09375
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f7444b8365ea513820f85545f6a315ea621999
| 6,577
|
py
|
Python
|
python/ray/tune/tests/test_tune_save_restore.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
python/ray/tune/tests/test_tune_save_restore.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
python/ray/tune/tests/test_tune_save_restore.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
# coding: utf-8
import os
import pickle
import shutil
import tempfile
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.utils import validate_save_restore
class SerialTuneRelativeLocalDirTest(unittest.TestCase):
local_mode = True
prefix = "Serial"
class MockTrainable(Trainable):
_name = "MockTrainable"
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, checkpoint_dir):
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint-{}".format(self._iteration)
)
with open(checkpoint_path, "wb") as f:
pickle.dump(self.state, f)
return checkpoint_path
def load_checkpoint(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
extra_data = pickle.load(f)
self.state.update(extra_data)
def setUp(self):
self.absolute_local_dir = None
ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)
def tearDown(self):
if self.absolute_local_dir is not None:
shutil.rmtree(self.absolute_local_dir, ignore_errors=True)
self.absolute_local_dir = None
ray.shutdown()
# Without this line, test_tune_server.testAddTrial would fail.
_register_all()
def _get_trial_dir(self, absoulte_exp_dir):
print("looking for", self.MockTrainable._name)
print("in", os.listdir(absoulte_exp_dir))
trial_dirname = next(
(
child_dir
for child_dir in os.listdir(absoulte_exp_dir)
if (
os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))
and child_dir.startswith(self.MockTrainable._name)
)
)
)
trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)
return trial_dirname, trial_absolute_dir
def _train(self, exp_name, local_dir, absolute_local_dir):
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=local_dir,
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
exp_dir = os.path.join(absolute_local_dir, exp_name)
_, abs_trial_dir = self._get_trial_dir(exp_dir)
self.assertIsNone(trial.error_file)
self.assertEqual(trial.local_dir, exp_dir)
self.assertEqual(trial.logdir, abs_trial_dir)
self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)
self.assertTrue(os.path.isdir(exp_dir))
self.assertTrue(os.path.isdir(abs_trial_dir))
self.assertTrue(
os.path.isfile(
os.path.join(abs_trial_dir, "checkpoint_000001/checkpoint-1")
)
)
def _restore(self, exp_name, local_dir, absolute_local_dir):
trial_name, abs_trial_dir = self._get_trial_dir(
os.path.join(absolute_local_dir, exp_name)
)
checkpoint_path = os.path.join(
local_dir, exp_name, trial_name, "checkpoint_000001/checkpoint-1"
) # Relative checkpoint path
# The file tune would find. The absolute checkpoint path.
tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))
self.assertTrue(
os.path.isfile(tune_find_file), "{} is not exist!".format(tune_find_file)
)
(trial,) = tune.run(
self.MockTrainable,
name=exp_name,
stop={"training_iteration": 2}, # train one more iteration.
restore=checkpoint_path, # Restore the checkpoint
config={"env": "CartPole-v0", "log_level": "DEBUG"},
).trials
self.assertIsNone(trial.error_file)
def testDottedRelativePath(self):
local_dir = "./test_dotted_relative_local_dir"
exp_name = self.prefix + "DottedRelativeLocalDir"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testRelativePath(self):
local_dir = "test_relative_local_dir"
exp_name = self.prefix + "RelativePath"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTildeAbsolutePath(self):
local_dir = "~/test_tilde_absolute_local_dir"
exp_name = self.prefix + "TildeAbsolutePath"
absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTempfile(self):
local_dir = tempfile.mkdtemp()
exp_name = self.prefix + "Tempfile"
self.absolute_local_dir = local_dir
self._train(exp_name, local_dir, local_dir)
self._restore(exp_name, local_dir, local_dir)
def testCheckpointWithNoop(self):
"""Tests that passing the checkpoint_dir right back works."""
class MockTrainable(Trainable):
def setup(self, config):
pass
def step(self):
return {"score": 1}
def save_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "wb") as f:
pickle.dump("test", f)
return checkpoint_dir
def load_checkpoint(self, checkpoint_dir):
with open(os.path.join(checkpoint_dir, "test.txt"), "rb") as f:
x = pickle.load(f)
assert x == "test"
return checkpoint_dir
validate_save_restore(MockTrainable)
validate_save_restore(MockTrainable, use_object_store=True)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 35.360215
| 85
| 0.635092
| 794
| 6,577
| 4.952141
| 0.200252
| 0.113937
| 0.122075
| 0.06409
| 0.495168
| 0.428789
| 0.338759
| 0.29705
| 0.247457
| 0.210071
| 0
| 0.005209
| 0.270336
| 6,577
| 185
| 86
| 35.551351
| 0.814128
| 0.039684
| 0
| 0.287671
| 0
| 0
| 0.0682
| 0.026646
| 0
| 0
| 0
| 0
| 0.089041
| 1
| 0.123288
| false
| 0.006849
| 0.082192
| 0.013699
| 0.280822
| 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76f83e45ce6ee12f802b7d17751ac89ea6359f61
| 21,788
|
py
|
Python
|
tests/gpuarray/test_basic_ops.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | 1
|
2020-12-30T19:12:52.000Z
|
2020-12-30T19:12:52.000Z
|
tests/gpuarray/test_basic_ops.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/gpuarray/test_basic_ops.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | 1
|
2020-08-15T17:09:10.000Z
|
2020-08-15T17:09:10.000Z
|
import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
TestAlloc,
TestComparison,
TestJoinAndSplit,
TestReshape,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
GpuContiguous,
GpuEye,
GpuFromHost,
GpuJoin,
GpuReshape,
GpuSplit,
GpuToGpu,
GpuTri,
HostFromGpu,
gpu_contiguous,
gpu_join,
host_from_gpu,
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = mode_with_gpu
return theano.function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import scalar_constructor, tensor_constructor
for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):
try:
return c(
value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs
)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
def makeTester(
name,
op,
gpu_op,
cases,
checks=None,
mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu,
skip=False,
eps=1e-10,
):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(utt.OptimizationTestMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setup_method(self):
eval(self.__class__.__module__ + "." + self.__class__.__name__)
def test_all(self):
if skip:
pytest.skip(skip)
for testname, inputs in cases.items():
for _ in range(len(inputs)):
if type(inputs[_]) is float:
inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while making " "a node with inputs %s"
) % (self.gpu_op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying to " "make a Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = (
"Test %s::%s: exception when calling the " "Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = (
"Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)"
% (self.gpu_op, testname, type(exc), type(ref_e))
)
exc.args += (err_msg,)
raise
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def test_transfer_cpu_gpu():
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)()
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
)
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
TestGpuAlloc = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
),
)
class TestGPUAlloc(TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]
def test_alloc_empty():
for dt in ["float32", "int8"]:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function(
[],
[
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
],
)
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == "uint64"
assert out[1].shape == (3, 2)
assert out[1].dtype == "uint64"
assert (
len(
[
node
for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)
]
)
== 1
)
def test_shape():
x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
if theano.config.mode != "FAST_COMPILE":
assert len(topo) == 4
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.Shape_i)
assert isinstance(topo[3].op, tt.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, tt.Shape)
def test_gpu_contiguous():
a = tt.fmatrix("a")
i = tt.iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function(
[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu
)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class TestGPUReshape(TestReshape):
def setup_method(self):
self.shared = gpuarray_shared_constructor
self.op = GpuReshape
self.mode = mode_with_gpu
self.ignore_topo = (
HostFromGpu,
GpuFromHost,
theano.compile.DeepCopyOp,
GpuDimShuffle,
GpuElemwise,
tt.opt.Shape_i,
tt.opt.MakeVector,
)
assert self.op == GpuReshape
class TestGPUComparison(TestComparison):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"]
class TestGPUJoinAndSplit(TestJoinAndSplit):
def setup_method(self):
self.mode = mode_with_gpu.excluding("constant_folding")
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = "float32"
self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"]
def shared(x, **kwargs):
return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)
self.shared = shared
def test_gpusplit_opt(self):
# Test that we move the node to the GPU
# Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16"))
o = tt.Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16"
f = theano.function([], o, mode=self.mode)
assert any(
[
isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()
]
)
o1, o2 = f()
assert np.allclose(o1, m.get_value(borrow=True)[:2])
assert np.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = tt.fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
b = tt.fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
f = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu
)
f_gpu = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu
)
f_gpu2 = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu
)
assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])
for dtype in ["float32", "int32", "float16"]:
check(dtype, 3)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
def test_hostfromgpu_shape_i():
# Test that the shape is lifted over hostfromgpu
m = mode_with_gpu.including(
"local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize"
)
a = tt.fmatrix("a")
ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32")
cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
)
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
def test_Gpujoin_inplace():
# Test Gpujoin to work inplace.
#
# This function tests the case when several elements are passed to the
# Gpujoin function but all except one of them are empty. In this case
# Gpujoin should work inplace and the output should be the view of the
# non-empty element.
s = tt.lscalar()
data = np.array([3, 4, 5], dtype=theano.config.floatX)
x = gpuarray_shared_constructor(data, borrow=True)
z = tt.zeros((s,))
join = GpuJoin(view=0)
c = join(0, x, z)
f = theano.function([s], theano.Out(c, borrow=True))
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
assert x.get_value(borrow=True, return_internal_type=True) is f(0)
assert np.allclose(f(0), [3, 4, 5])
def test_gpu_tril_triu():
def check_l(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.tril(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def check_u(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.triu(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]:
# try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
def test_gputri():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
for dtype in ["float64", "float32", "int32", "float16"]:
# try a big one
check(dtype, 1000, 1000, 0)
check(dtype, 1000, 1000, -400)
check(dtype, 1000, 1000, 400)
check(dtype, 5)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
| 32.962179
| 88
| 0.575867
| 2,989
| 21,788
| 4.056541
| 0.126798
| 0.028866
| 0.019052
| 0.024742
| 0.488495
| 0.445361
| 0.40066
| 0.378227
| 0.339381
| 0.331299
| 0
| 0.027783
| 0.296264
| 21,788
| 660
| 89
| 33.012121
| 0.762995
| 0.067927
| 0
| 0.358779
| 0
| 0
| 0.04584
| 0.004688
| 0
| 0
| 0
| 0
| 0.129771
| 1
| 0.055344
| false
| 0
| 0.030534
| 0.001908
| 0.125954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fb80b4170accbe860db8c0999717d64544977e
| 5,741
|
py
|
Python
|
ament_tools/setup_arguments.py
|
richmattes/ament_tools
|
2a25cdcc273fcd73e81e8a47fe892a0b5963307d
|
[
"Apache-2.0"
] | 1
|
2020-05-19T14:33:49.000Z
|
2020-05-19T14:33:49.000Z
|
ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/setup_arguments.py
|
mintforpeople/robobo-ros2-ios-port
|
1a5650304bd41060925ebba41d6c861d5062bfae
|
[
"Apache-2.0"
] | null | null | null |
ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/setup_arguments.py
|
mintforpeople/robobo-ros2-ios-port
|
1a5650304bd41060925ebba41d6c861d5062bfae
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import distutils.core
import os
try:
import setuptools
except ImportError:
pass
import subprocess
import sys
from threading import Lock
from ament_tools.build_type import get_command_prefix
from ament_tools.helper import quote_shell_command
setup_lock = None
def get_setup_arguments_with_context(build_type, context):
"""
Capture the arguments of the setup() function in the setup.py file.
To provide a custom environment when introspecting the setup() function
a separate Python interpreter is being used which can have an extended
PYTHONPATH etc.
:param build_type: the build type
:param context: the context
:type context: :py:class:`ament_tools.context.Context`
:returns: a dictionary containing the arguments of the setup() function
"""
prefix = get_command_prefix(
'%s__setup' % build_type, context.build_space,
context.build_dependencies)
ament_tools_path = os.path.dirname(os.path.dirname(__file__))
setuppy = os.path.join(context.source_space, 'setup.py')
if os.name == 'nt':
ament_tools_path = ament_tools_path.replace(os.sep, os.altsep)
setuppy = setuppy.replace(os.sep, os.altsep)
code_lines = [
'import sys',
"sys.path.insert(0, '%s')" % ament_tools_path,
'from ament_tools.setup_arguments import get_setup_arguments',
"print(repr(get_setup_arguments('%s')))" % setuppy]
# invoke get_setup_arguments() in a separate interpreter
cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)]
cmd = quote_shell_command(cmd)
result = subprocess.run(
cmd, stdout=subprocess.PIPE, shell=True, check=True)
output = result.stdout.decode()
return ast.literal_eval(output)
def get_setup_arguments(setup_py_path):
"""
Capture the arguments of the setup() function in the setup.py file.
The function is being run within the current Python interpreter.
Therefore the processed setup.py file can not have any additional
dependencies not available in the current environment.
:param setup_py_path: the path to the setup.py file
:returns: a dictionary containing the arguments of the setup() function
"""
global setup_lock
if not setup_lock:
setup_lock = Lock()
assert os.path.basename(setup_py_path) == 'setup.py'
# prevent side effects in other threads
with setup_lock:
# change to the directory containing the setup.py file
old_cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(setup_py_path)))
try:
data = {}
mock_setup = create_mock_setup_function(data)
# replace setup() function of distutils and setuptools
# in order to capture its arguments
try:
distutils_setup = distutils.core.setup
distutils.core.setup = mock_setup
try:
setuptools_setup = setuptools.setup
setuptools.setup = mock_setup
except NameError:
pass
# evaluate the setup.py file
with open('setup.py', 'r') as h:
exec(h.read())
finally:
distutils.core.setup = distutils_setup
try:
setuptools.setup = setuptools_setup
except NameError:
pass
return data
finally:
os.chdir(old_cwd)
def create_mock_setup_function(data):
"""
Create a mock function to capture its arguments.
It can replace either distutils.core.setup or setuptools.setup.
:param data: a dictionary which is updated with the captured arguments
:returns: a function to replace disutils.core.setup and setuptools.setup
"""
def setup(*args, **kwargs):
if args:
raise RuntimeError(
'setup() function invoked with positional arguments')
if 'name' not in kwargs:
raise RuntimeError(
"setup() function invoked without the keyword argument 'name'")
data.update(kwargs)
return setup
def get_data_files_mapping(data_files):
"""
Transform the data_files structure into a dictionary.
:param data_files: either a list of source files or
a list of tuples where the first element is the destination path and
the second element is a list of source files
:returns: a dictionary mapping the source file to a destination file
"""
mapping = {}
for data_file in data_files:
if isinstance(data_file, tuple):
assert len(data_file) == 2
dest = data_file[0]
assert not os.path.isabs(dest)
sources = data_file[1]
assert isinstance(sources, list)
for source in sources:
assert not os.path.isabs(source)
mapping[source] = os.path.join(dest, os.path.basename(source))
else:
assert not os.path.isabs(data_file)
mapping[data_file] = os.path.basename(data_file)
return mapping
| 35.006098
| 79
| 0.656854
| 743
| 5,741
| 4.951548
| 0.293405
| 0.024735
| 0.01794
| 0.019027
| 0.163088
| 0.081
| 0.060342
| 0.060342
| 0.060342
| 0.060342
| 0
| 0.002858
| 0.268594
| 5,741
| 163
| 80
| 35.220859
| 0.873303
| 0.38077
| 0
| 0.157303
| 0
| 0
| 0.083529
| 0.019118
| 0
| 0
| 0
| 0
| 0.067416
| 1
| 0.05618
| false
| 0.033708
| 0.134831
| 0
| 0.235955
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fc510648fa61f30ccc12c1c9b02c19d255e9c6
| 870
|
py
|
Python
|
tests/functional/test_soft_round_inverse.py
|
tallamjr/NeuralCompression
|
21d05ec0d9f8c52d8742fde36f569b4dad2842a5
|
[
"MIT"
] | 233
|
2021-07-19T18:50:21.000Z
|
2022-03-30T22:06:40.000Z
|
tests/functional/test_soft_round_inverse.py
|
tallamjr/NeuralCompression
|
21d05ec0d9f8c52d8742fde36f569b4dad2842a5
|
[
"MIT"
] | 79
|
2021-07-22T13:33:45.000Z
|
2022-02-09T16:38:42.000Z
|
tests/functional/test_soft_round_inverse.py
|
tallamjr/NeuralCompression
|
21d05ec0d9f8c52d8742fde36f569b4dad2842a5
|
[
"MIT"
] | 21
|
2021-07-29T18:27:59.000Z
|
2022-02-28T02:32:53.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from neuralcompression.functional import soft_round, soft_round_inverse
def test_soft_round_inverse():
x = torch.linspace(-2.0, 2.0, 50)
torch.testing.assert_close(
x,
soft_round_inverse(x, alpha=1e-13),
)
x = torch.tensor([-1.25, -0.75, 0.75, 1.25])
torch.testing.assert_close(
x,
soft_round_inverse(soft_round(x, alpha=2.0), alpha=2.0),
)
for offset in range(-5, 5):
x = torch.linspace(offset + 0.001, offset + 0.999, 100)
torch.testing.assert_close(
torch.ceil(x) - 0.5,
soft_round_inverse(x, alpha=5000.0),
atol=0.001,
rtol=0.002,
)
| 24.857143
| 71
| 0.61954
| 131
| 870
| 3.992366
| 0.458015
| 0.120459
| 0.152964
| 0.097514
| 0.206501
| 0.152964
| 0.152964
| 0.152964
| 0
| 0
| 0
| 0.082555
| 0.262069
| 870
| 34
| 72
| 25.588235
| 0.732087
| 0.195402
| 0
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fce814c3b3e855b82681563736510cd9476acb
| 1,738
|
py
|
Python
|
dizoo/pybullet/config/hopper_ppo_default_config.py
|
konnase/DI-engine
|
f803499cad191e9277b10e194132d74757bcfc8e
|
[
"Apache-2.0"
] | 2
|
2021-07-30T15:55:45.000Z
|
2021-07-30T16:35:10.000Z
|
dizoo/pybullet/config/hopper_ppo_default_config.py
|
konnase/DI-engine
|
f803499cad191e9277b10e194132d74757bcfc8e
|
[
"Apache-2.0"
] | null | null | null |
dizoo/pybullet/config/hopper_ppo_default_config.py
|
konnase/DI-engine
|
f803499cad191e9277b10e194132d74757bcfc8e
|
[
"Apache-2.0"
] | null | null | null |
from easydict import EasyDict
hopper_ppo_default_config = dict(
env=dict(
env_id='HopperMuJoCoEnv-v0',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=8,
evaluator_env_num=10,
use_act_scale=True,
n_evaluator_episode=10,
stop_value=3000,
),
policy=dict(
cuda=True,
on_policy=True,
recompute_adv=True,
model=dict(
obs_shape=11,
action_shape=3,
continuous=True,
),
continuous=True,
learn=dict(
epoch_per_collect=10,
batch_size=64,
learning_rate=3e-4,
value_weight=0.5,
entropy_weight=0.0,
clip_ratio=0.2,
adv_norm=True,
value_norm=True,
),
collect=dict(
n_sample=2048,
unroll_len=1,
discount_factor=0.99,
gae_lambda=0.97,
),
eval=dict(evaluator=dict(eval_freq=5000, )),
other=dict(replay_buffer=dict(
replay_buffer_size=10000,
replay_buffer_start_size=0,
), ),
),
)
hopper_ppo_default_config = EasyDict(hopper_ppo_default_config)
main_config = hopper_ppo_default_config
hopper_ppo_create_default_config = dict(
env=dict(
type='pybullet',
import_names=['dizoo.pybullet.envs.pybullet_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(
type='ppo',
import_names=['ding.policy.ppo'],
),
replay_buffer=dict(type='naive', ),
)
hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config)
create_config = hopper_ppo_create_default_config
| 27.15625
| 77
| 0.596663
| 205
| 1,738
| 4.702439
| 0.434146
| 0.074689
| 0.06639
| 0.091286
| 0.235477
| 0.070539
| 0
| 0
| 0
| 0
| 0
| 0.037985
| 0.303222
| 1,738
| 63
| 78
| 27.587302
| 0.758051
| 0
| 0
| 0.213115
| 0
| 0
| 0.052359
| 0.018412
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04918
| 0
| 0.04918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fcfe8188f93389658caff72e97003d25b756ad
| 1,519
|
py
|
Python
|
cisco_sdwan_policy/List/Application.py
|
ljm625/cisco_sdwan_policy_python
|
1dd1361a7c4e8ee36df6176f54583081b4ad800a
|
[
"MIT"
] | 11
|
2019-11-07T02:22:34.000Z
|
2022-03-04T17:47:02.000Z
|
cisco_sdwan_policy/List/Application.py
|
ljm625/cisco_sdwan_policy_python
|
1dd1361a7c4e8ee36df6176f54583081b4ad800a
|
[
"MIT"
] | null | null | null |
cisco_sdwan_policy/List/Application.py
|
ljm625/cisco_sdwan_policy_python
|
1dd1361a7c4e8ee36df6176f54583081b4ad800a
|
[
"MIT"
] | 6
|
2019-11-07T02:22:41.000Z
|
2020-07-30T01:58:51.000Z
|
import json
from cisco_sdwan_policy.BaseObject import BaseObject
class Application(BaseObject):
def __init__(self,name,app_list,is_app_family,id=None,reference=None,**kwargs):
self.type = "appList"
self.id = id
self.name = name
self.references = reference
self.app_family=is_app_family
self._entries = app_list
self.url = "template/policy/list/app"
super().__init__(**kwargs)
self.modified=False
def get_entries(self):
return self._entries
def set_entries(self,entries):
self.modified=True
self._entries=entries
@classmethod
def from_json(cls,jsonfile,**kwargs):
id = jsonfile["listId"]
name = jsonfile["name"]
references = jsonfile.get("references")
if len(jsonfile["entries"])>0 and jsonfile["entries"][0].get("app"):
appFamily=False
entries = [i["app"] for i in jsonfile["entries"]]
else:
if not jsonfile["entries"][0].get("appFamily"):
return None
else:
appFamily=True
entries = [i["appFamily"] for i in jsonfile["entries"]]
return cls(name,entries,appFamily,id,references,**kwargs)
def to_json(self):
return {
"name":self.name,
"description":"Desc Not Required",
"type":"app",
"entries":[
{"appFamily" if self.app_family else "app":i} for i in self._entries]
}
| 29.784314
| 85
| 0.579987
| 173
| 1,519
| 4.942197
| 0.300578
| 0.064327
| 0.05614
| 0.044444
| 0.049123
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002814
| 0.298223
| 1,519
| 50
| 86
| 30.38
| 0.79925
| 0
| 0
| 0.04878
| 0
| 0
| 0.110599
| 0.0158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.04878
| 0.04878
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fe32cf212234521487302570fb1379460db739
| 1,575
|
py
|
Python
|
supervisor/docker/dns.py
|
zeehio/supervisor
|
b2f2806465001b4f6500601fa4c6516a404d53b8
|
[
"Apache-2.0"
] | null | null | null |
supervisor/docker/dns.py
|
zeehio/supervisor
|
b2f2806465001b4f6500601fa4c6516a404d53b8
|
[
"Apache-2.0"
] | null | null | null |
supervisor/docker/dns.py
|
zeehio/supervisor
|
b2f2806465001b4f6500601fa4c6516a404d53b8
|
[
"Apache-2.0"
] | null | null | null |
"""DNS docker object."""
import logging
from ..const import ENV_TIME
from ..coresys import CoreSysAttributes
from .interface import DockerInterface
_LOGGER: logging.Logger = logging.getLogger(__name__)
DNS_DOCKER_NAME: str = "hassio_dns"
class DockerDNS(DockerInterface, CoreSysAttributes):
"""Docker Supervisor wrapper for Supervisor DNS."""
@property
def image(self) -> str:
"""Return name of Supervisor DNS image."""
return self.sys_plugins.dns.image
@property
def name(self) -> str:
"""Return name of Docker container."""
return DNS_DOCKER_NAME
def _run(self) -> None:
"""Run Docker image.
Need run inside executor.
"""
if self._is_running():
return
# Cleanup
self._stop()
# Create & Run container
docker_container = self.sys_docker.run(
self.image,
tag=self.sys_plugins.dns.version.string,
init=False,
dns=False,
ipv4=self.sys_docker.network.dns,
name=self.name,
hostname=self.name.replace("_", "-"),
detach=True,
environment={ENV_TIME: self.sys_config.timezone},
volumes={
str(self.sys_config.path_extern_dns): {"bind": "/config", "mode": "rw"}
},
)
self._meta = docker_container.attrs
_LOGGER.info(
"Starting DNS %s with version %s - %s",
self.image,
self.version,
self.sys_docker.network.dns,
)
| 26.25
| 87
| 0.576508
| 169
| 1,575
| 5.195266
| 0.408284
| 0.055809
| 0.044419
| 0.038724
| 0.095672
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000923
| 0.312381
| 1,575
| 59
| 88
| 26.694915
| 0.809788
| 0.133968
| 0
| 0.105263
| 0
| 0
| 0.049242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.105263
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fe3680ef2ec070b0bf345c1f776ebc38adabdb
| 2,927
|
py
|
Python
|
nuitka/codegen/OperatorCodes.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | null | null | null |
nuitka/codegen/OperatorCodes.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1
|
2019-03-01T11:33:40.000Z
|
2019-03-01T11:33:40.000Z
|
nuitka/codegen/OperatorCodes.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1
|
2019-03-26T16:56:21.000Z
|
2019-03-26T16:56:21.000Z
|
# Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Operator code tables
These are mostly used to look up the Python C/API from operations or a wrapper used.
"""
from nuitka.PythonVersions import python_version
binary_operator_codes = {
# Those commented out in this section have fully specialized variants already.
# "Add" : "PyNumber_Add",
# "Sub" : "PyNumber_Subtract",
# "Div" : "PyNumber_Divide",
# "Mult" : "PyNumber_Multiply",
# "Mod" : "PyNumber_Remainder",
# "Div" : "PyNumber_Divide",
# "FloorDiv" : "PyNumber_FloorDivide",
# "TrueDiv" : "PyNumber_TrueDivide",
# These have their own variants only to make sure the generic code is in-lined
# but the CPython code is not in-lined.
# "Pow" : "PyNumber_Power",
# "IPow" : "PyNumber_InPlacePower",
# The others are generic code and would be faster if they had a specialized variant too.
"LShift": "PyNumber_Lshift",
"RShift": "PyNumber_Rshift",
"BitAnd": "PyNumber_And",
"BitOr": "PyNumber_Or",
"BitXor": "PyNumber_Xor",
"IAdd": "PyNumber_InPlaceAdd",
"ISub": "PyNumber_InPlaceSubtract",
"IMult": "PyNumber_InPlaceMultiply",
"IDiv": "PyNumber_InPlaceDivide",
"IFloorDiv": "PyNumber_InPlaceFloorDivide",
"ITrueDiv": "PyNumber_InPlaceTrueDivide",
"IMod": "PyNumber_InPlaceRemainder",
"ILShift": "PyNumber_InPlaceLshift",
"IRShift": "PyNumber_InPlaceRshift",
"IBitAnd": "PyNumber_InPlaceAnd",
"IBitOr": "PyNumber_InPlaceOr",
"IBitXor": "PyNumber_InPlaceXor",
}
# Python 3.5 only operator
if python_version >= 350:
binary_operator_codes["MatMult"] = "PyNumber_MatrixMultiply"
binary_operator_codes["IMatMult"] = "PyNumber_InPlaceMatrixMultiply"
unary_operator_codes = {
"UAdd": ("PyNumber_Positive", 1),
"USub": ("PyNumber_Negative", 1),
"Invert": ("PyNumber_Invert", 1),
"Repr": ("PyObject_Repr", 1),
"Not": ("UNARY_NOT", 0),
}
rich_comparison_codes = {
"Lt": "LT",
"LtE": "LE",
"Eq": "EQ",
"NotEq": "NE",
"Gt": "GT",
"GtE": "GE",
}
containing_comparison_codes = ("In", "NotIn")
| 35.26506
| 92
| 0.65767
| 339
| 2,927
| 5.536873
| 0.587021
| 0.031966
| 0.030368
| 0.017048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007881
| 0.219679
| 2,927
| 82
| 93
| 35.695122
| 0.813923
| 0.524428
| 0
| 0
| 0
| 0
| 0.465237
| 0.181213
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76ffe18f535cd4c67dd1eed479466fb1bd48b6ea
| 6,130
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql/implementation/external.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
python_modules/dagster-graphql/dagster_graphql/implementation/external.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql/implementation/external.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
import sys
from dagster import check
from dagster.config.validate import validate_config_from_snap
from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector
from dagster.core.workspace.context import BaseWorkspaceRequestContext
from dagster.utils.error import serializable_error_info_from_exc_info
from graphql.execution.base import ResolveInfo
from .utils import UserFacingGraphQLError, capture_error
def get_full_external_pipeline_or_raise(graphene_info, selector):
from ..schema.errors import GraphenePipelineNotFoundError
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(selector, "selector", PipelineSelector)
if not graphene_info.context.has_external_pipeline(selector):
raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=selector))
return graphene_info.context.get_full_external_pipeline(selector)
def get_external_pipeline_or_raise(graphene_info, selector):
from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError
from ..schema.pipelines.pipeline import GraphenePipeline
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(selector, "selector", PipelineSelector)
full_pipeline = get_full_external_pipeline_or_raise(graphene_info, selector)
if selector.solid_selection is None:
return full_pipeline
for solid_name in selector.solid_selection:
if not full_pipeline.has_solid_invocation(solid_name):
raise UserFacingGraphQLError(
GrapheneInvalidSubsetError(
message='Solid "{solid_name}" does not exist in "{pipeline_name}"'.format(
solid_name=solid_name, pipeline_name=selector.pipeline_name
),
pipeline=GraphenePipeline(full_pipeline),
)
)
return get_subset_external_pipeline(graphene_info.context, selector)
def get_subset_external_pipeline(context, selector):
from ..schema.pipelines.pipeline_errors import GrapheneInvalidSubsetError
from ..schema.pipelines.pipeline import GraphenePipeline
check.inst_param(selector, "selector", PipelineSelector)
repository_location = context.get_repository_location(selector.location_name)
try:
external_pipeline = repository_location.get_external_pipeline(selector)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
raise UserFacingGraphQLError(
GrapheneInvalidSubsetError(
message="{message}{cause_message}".format(
message=error_info.message,
cause_message="\n{}".format(error_info.cause.message)
if error_info.cause
else "",
),
pipeline=GraphenePipeline(context.get_full_external_pipeline(selector)),
)
)
return external_pipeline
def ensure_valid_config(external_pipeline, mode, run_config):
from ..schema.pipelines.config import GrapheneRunConfigValidationInvalid
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
check.opt_str_param(mode, "mode")
# do not type check run_config so that validate_config_from_snap throws
validated_config = validate_config_from_snap(
config_schema_snapshot=external_pipeline.config_schema_snapshot,
config_type_key=external_pipeline.root_config_key_for_mode(mode),
config_value=run_config,
)
if not validated_config.success:
raise UserFacingGraphQLError(
GrapheneRunConfigValidationInvalid.for_validation_errors(
external_pipeline, validated_config.errors
)
)
return validated_config
def get_external_execution_plan_or_raise(
graphene_info,
external_pipeline,
mode,
run_config,
step_keys_to_execute,
known_state,
):
return graphene_info.context.get_external_execution_plan(
external_pipeline=external_pipeline,
run_config=run_config,
mode=mode,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
)
@capture_error
def fetch_repositories(graphene_info):
from ..schema.external import GrapheneRepository, GrapheneRepositoryConnection
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
return GrapheneRepositoryConnection(
nodes=[
GrapheneRepository(repository=repository, repository_location=location)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
]
)
@capture_error
def fetch_repository(graphene_info, repository_selector):
from ..schema.errors import GrapheneRepositoryNotFoundError
from ..schema.external import GrapheneRepository
check.inst_param(graphene_info, "graphene_info", ResolveInfo)
check.inst_param(repository_selector, "repository_selector", RepositorySelector)
if graphene_info.context.has_repository_location(repository_selector.location_name):
repo_loc = graphene_info.context.get_repository_location(repository_selector.location_name)
if repo_loc.has_repository(repository_selector.repository_name):
return GrapheneRepository(
repository=repo_loc.get_repository(repository_selector.repository_name),
repository_location=repo_loc,
)
return GrapheneRepositoryNotFoundError(
repository_selector.location_name, repository_selector.repository_name
)
@capture_error
def fetch_workspace(workspace_request_context):
from ..schema.external import GrapheneWorkspace, GrapheneWorkspaceLocationEntry
check.inst_param(
workspace_request_context, "workspace_request_context", BaseWorkspaceRequestContext
)
nodes = [
GrapheneWorkspaceLocationEntry(entry)
for entry in workspace_request_context.get_workspace_snapshot().values()
]
return GrapheneWorkspace(locationEntries=nodes)
| 36.272189
| 99
| 0.746656
| 625
| 6,130
| 6.9792
| 0.1776
| 0.057772
| 0.032095
| 0.021091
| 0.307428
| 0.235442
| 0.161623
| 0.161623
| 0.15039
| 0.117148
| 0
| 0
| 0.191028
| 6,130
| 168
| 100
| 36.488095
| 0.879613
| 0.011256
| 0
| 0.185484
| 0
| 0
| 0.037135
| 0.008087
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.145161
| 0.008065
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
76fffdbfafd70ccdff333934ec210a4753dad75a
| 1,552
|
py
|
Python
|
tests/test_utils/test_pywriting_utils.py
|
heylohousing/quickbase-client
|
46e4eea3a5c7a2720560e5688eb4f0fbdb607206
|
[
"MIT"
] | null | null | null |
tests/test_utils/test_pywriting_utils.py
|
heylohousing/quickbase-client
|
46e4eea3a5c7a2720560e5688eb4f0fbdb607206
|
[
"MIT"
] | null | null | null |
tests/test_utils/test_pywriting_utils.py
|
heylohousing/quickbase-client
|
46e4eea3a5c7a2720560e5688eb4f0fbdb607206
|
[
"MIT"
] | null | null | null |
import os
from tempfile import TemporaryDirectory
from quickbase_client.utils.pywriting_utils import BasicPyFileWriter
from quickbase_client.utils.pywriting_utils import PyPackageWriter
class TestBasicFileWriter:
def test_outputs_lines(self):
w = BasicPyFileWriter()
w.add_line('import abc')
w.add_line('import os').space()
s = w.get_file_as_string()
assert s == 'import abc\nimport os\n'
def test_indent_dedent(self):
w = BasicPyFileWriter()
w.add_line('def foo():').indent().add_line('return 5').dedent().space()
s = w.get_file_as_string()
assert s == 'def foo():\n return 5\n'
def test_use_refs(self):
w = BasicPyFileWriter()
w.add_line('a = "A"')
ref = w.make_ref()
w.add_line('d = "D"')
ref.add_line('b = "B"').add_line('c = "C"')
s = w.get_file_as_string()
lns = s.split('\n')
assert 'a' in lns[0]
assert 'b' in lns[1]
assert 'c' in lns[2]
assert 'd' in lns[3]
class TestPyPackageWriter:
def test_includes_init(self):
with TemporaryDirectory() as d:
w = PyPackageWriter(pkg_name='foo', parent_dir=d)
assert '__init__' in w.modules
assert w.has_module_name('__init__')
assert w.pkg_path == os.path.join(d, 'foo')
w.write()
assert os.path.exists(d)
assert os.path.exists(os.path.join(d, 'foo'))
assert os.path.exists(os.path.join(d, 'foo', '__init__.py'))
| 31.673469
| 79
| 0.595361
| 214
| 1,552
| 4.098131
| 0.313084
| 0.063854
| 0.04561
| 0.078677
| 0.377423
| 0.36146
| 0.239453
| 0.139111
| 0.139111
| 0
| 0
| 0.005291
| 0.26933
| 1,552
| 48
| 80
| 32.333333
| 0.768078
| 0
| 0
| 0.153846
| 0
| 0
| 0.102448
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.102564
| false
| 0
| 0.179487
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a00e9db3835c97e792fd5d157869c740791d2ab
| 1,060
|
py
|
Python
|
src/main/python/rds_log_cat/parser/mysql57.py
|
Scout24/rds-log-cat
|
00147dc2e3ec6fc894fccd5a9cbf7faa71cf7e78
|
[
"MIT"
] | 1
|
2019-11-07T10:44:28.000Z
|
2019-11-07T10:44:28.000Z
|
src/main/python/rds_log_cat/parser/mysql57.py
|
Scout24/rds-log-cat
|
00147dc2e3ec6fc894fccd5a9cbf7faa71cf7e78
|
[
"MIT"
] | 2
|
2017-04-25T13:36:44.000Z
|
2018-03-12T20:34:21.000Z
|
src/main/python/rds_log_cat/parser/mysql57.py
|
ImmobilienScout24/rds-log-cat
|
00147dc2e3ec6fc894fccd5a9cbf7faa71cf7e78
|
[
"MIT"
] | 1
|
2021-01-27T19:08:09.000Z
|
2021-01-27T19:08:09.000Z
|
from rds_log_cat.parser.parser import Parser, LineParserException
class Mysql57(Parser):
def __init__(self):
Parser.__init__(self)
def compose_timestamp(self, datetime, timezone):
if len(datetime) != 27:
raise LineParserException('wrong length of datetime - wrong date is: ' + datetime)
if not timezone == 'UTC':
raise LineParserException('Only able to parse times in UTC. You gave {}'.format(timezone))
return datetime
def parse(self, line):
"""
parses the fields in line to generate json structure
"""
expected_min_no_fields = 5
if len(line) < expected_min_no_fields:
raise LineParserException('line too short')
pid = line[1]
log_level = line[2].lstrip("[").rstrip("]")
timezone = 'UTC'
return {
'@timestamp': self.compose_timestamp(line[0], timezone),
'log_level': log_level,
'process_id': int(pid),
'message': ' '.join(map(str, line[3:]))
}
| 31.176471
| 102
| 0.592453
| 120
| 1,060
| 5.05
| 0.541667
| 0.118812
| 0.042904
| 0.062706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012016
| 0.293396
| 1,060
| 33
| 103
| 32.121212
| 0.797063
| 0.049057
| 0
| 0
| 0
| 0
| 0.147358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a01272b6dc30ae670eab0e73c74a21ff812e409
| 16,090
|
py
|
Python
|
corpustools/neighdens/neighborhood_density.py
|
PhonologicalCorpusTools/CorpusTools
|
ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e
|
[
"BSD-3-Clause"
] | 97
|
2015-07-06T18:58:43.000Z
|
2022-03-10T23:00:07.000Z
|
corpustools/neighdens/neighborhood_density.py
|
PhonologicalCorpusTools/CorpusTools
|
ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e
|
[
"BSD-3-Clause"
] | 443
|
2015-03-10T21:24:39.000Z
|
2022-03-22T22:20:13.000Z
|
corpustools/neighdens/neighborhood_density.py
|
PhonologicalCorpusTools/CorpusTools
|
ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e
|
[
"BSD-3-Clause"
] | 22
|
2015-07-19T18:56:24.000Z
|
2020-09-17T17:58:12.000Z
|
from functools import partial
from corpustools.corpus.classes import Word
from corpustools.symbolsim.edit_distance import edit_distance
from corpustools.symbolsim.khorsi import khorsi
from corpustools.symbolsim.phono_edit_distance import phono_edit_distance
from corpustools.symbolsim.phono_align import Aligner
from corpustools.multiproc import filter_mp, score_mp
def _is_edit_distance_neighbor(w, query, sequence_type, max_distance):
w_len = len(getattr(w, sequence_type))
query_len = len(getattr(query, sequence_type))
if w_len > query_len+max_distance:
return False
if w_len < query_len-max_distance:
return False
return edit_distance(getattr(w, sequence_type), getattr(query, sequence_type),
sequence_type, max_distance) <= max_distance
def _is_phono_edit_distance_neighbor(w, query, sequence_type, specifier, max_distance):
return phono_edit_distance(getattr(w, sequence_type), getattr(query, sequence_type), sequence_type, specifier) <= max_distance
def _is_khorsi_neighbor(w, query, freq_base, sequence_type, max_distance):
return khorsi(getattr(w, sequence_type), getattr(query, sequence_type), freq_base, sequence_type, max_distance) >= max_distance
def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None,
algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling',
num_cores = -1, settable_attr = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of all words in the corpus and
adds them as attributes of the words.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
settable_attr: string
Name of attribute that neighbourhood density results will be assigned to
"""
function = partial(neighborhood_density, corpus_context,
tierdict = tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = neighborhood_density(corpus_context, w, tierdict,
tier_type = tier_type,
sequence_type = sequence_type,
algorithm = algorithm,
max_distance = max_distance,
collapse_homophones = collapse_homophones)
results[str(w)] = [getattr(r, output_format) for r in res[1]]
setattr(w.original, settable_attr.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = [getattr(r, output_format) for r in res[1]]
# setattr(w.original, settable_attr.name, res[0]-1)
# #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect
# #subtracting 1 here is easier than fixing the neighbourhood density algorithm
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])),
#corpus_context.attribute.name, n[1][0])
settable_attr.name, n[1][0])
return results
def neighborhood_density(corpus_context, query, tierdict,
algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False,
force_quadratic = False, file_type = None, tier_type=None, sequence_type = None,
stop_check = None, call_back = None):
"""Calculate the neighborhood density of a particular word in the corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose neighborhood density to calculate.
algorithm : str
The algorithm used to determine distance
max_distance : float, optional
Maximum edit distance from the queried word to consider a word a neighbor
force_quadratic : bool
Force use of the less efficient quadratic algorithm even when finding edit
distance of 1 neighborhoods
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
tuple(int, set)
Tuple of the number of neighbors and the set of neighbor Words.
"""
matches = []
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors for {}...'.format(query))
call_back(0,len(corpus_context))
cur = 0
if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic:
return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict,
file_type=file_type, collapse_homophones=collapse_homophones)
if algorithm == 'edit_distance':
is_neighbor = partial(_is_edit_distance_neighbor,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'phono_edit_distance':
is_neighbor = partial(_is_phono_edit_distance_neighbor,
specifier = corpus_context.specifier,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
elif algorithm == 'khorsi':
freq_base = corpus_context.get_frequency_base()
is_neighbor = partial(_is_khorsi_neighbor,
freq_base = freq_base,
sequence_type = corpus_context.sequence_type,
max_distance = max_distance)
for w in corpus_context:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if not is_neighbor(w, query):
continue
matches.append(w)
neighbors = set(matches)-set([query])
return (len(neighbors), neighbors)
def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type,
tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False):
"""Generates all neighbors of edit distance <= 1 and searches
for them in corpus_context.
Will be faster than neighborhood_density when:
n > m * (1 + s), where
n: number of words in corpus
m: length of query
s: size of segment inventory
"""
neighbors = list()
query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type)
for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type):
if tier_type.att_type == 'tier':
cand_str = trans_delimiter.join(candidate)
else:
cand_str = ''.join(candidate)
if cand_str in tierdict:
for w in tierdict[cand_str]:
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors):
continue
else:
neighbors.append(w)
return (len(neighbors), neighbors)
def generate_neighbor_candidates(corpus_context, query, sequence_type):
sequence = getattr(query, sequence_type)
yield [str(c) for c in sequence]
for i in range(len(sequence)):
yield [str(c) for c in sequence[:i]] + [str(c) for c in sequence[i+1:]] # deletion
for char in corpus_context.inventory:
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i:]] # insertion
yield [str(c) for c in sequence[:i]] + [str(char)] + [str(c) for c in sequence[i+1:]] # substitution
for char in corpus_context.inventory: # final pass to get insertion at len+1
if str(char) not in ['#', sequence[i]]:
yield [str(c) for c in sequence[:]] + [str(char)] # insertion
def find_mutation_minpairs_all_words(corpus_context, tierdict, tier_type = None, num_cores = -1, collapse_homophones = False,
stop_check = None, call_back = None):
function = partial(find_mutation_minpairs, corpus_context, tier_type=tier_type, collapse_homophones = collapse_homophones)
if call_back is not None:
call_back('Calculating neighborhood densities...')
call_back(0,len(corpus_context))
cur = 0
results = dict()
last_value_removed = None
last_key_removed = None
if num_cores == -1 or num_cores == 1:
for w in corpus_context:
if stop_check is not None and stop_check():
return
if last_value_removed:
tierdict[last_key_removed].append(last_value_removed)
w_sequence = getattr(w, corpus_context.sequence_type)
last_key_removed = str(w_sequence)
for i, item in enumerate(tierdict[last_key_removed]):
if str(item) == str(w):
last_value_removed = tierdict[last_key_removed].pop(i)
break
res = find_mutation_minpairs(corpus_context, w,
tier_type=tier_type, collapse_homophones = collapse_homophones)
results[str(w)] = res[1]
setattr(w.original, corpus_context.attribute.name, res[0])
# for w in corpus_context:
# if stop_check is not None and stop_check():
# return
# cur += 1
# call_back(cur)
# res = function(w)
# results[str(w)] = res[1]#[str(r) for r in res[1]]
# setattr(w.original, corpus_context.attribute.name, res[0])
else:
iterable = ((w,) for w in corpus_context)
neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size= 1)
for n in neighbors:
#Have to look up the key, then look up the object due to how
#multiprocessing pickles objects
setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), corpus_context.attribute.name, n[1][0])
return results
def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False,
stop_check = None, call_back = None):
"""Find all minimal pairs of the query word based only on segment
mutations (not deletions/insertions)
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
query : Word
The word whose minimal pairs to find
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the function
Returns
-------
list
The found minimal pairs for the queried word
"""
matches = []
sequence_type = corpus_context.sequence_type
query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type)
if call_back is not None:
call_back('Finding neighbors...')
call_back(0,len(corpus_context))
cur = 0
al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1)
for w in corpus_context:
w_sequence = getattr(w, sequence_type)
query_sequence = getattr(query, sequence_type)
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
if cur % 10 == 0:
call_back(cur)
if (len(w_sequence) > len(query_sequence)+1 or
len(w_sequence) < len(query_sequence)-1):
continue
m = al.make_similarity_matrix(query_sequence, w_sequence)
if m[-1][-1]['f'] != 1:
continue
w_sequence = getattr(w, sequence_type)
if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches):
continue
else:
#matches.append(str(w_sequence))
matches.append(w)
matches = [m.spelling for m in matches]
neighbors = list(set(matches)-set([str(query_sequence)]))
return (len(neighbors), neighbors)
def ensure_query_is_word(query, corpus, sequence_type, tier_type, trans_delimiter='.', file_type=None):
if isinstance(query, Word):
query_word = query
else:
if tier_type.att_type == 'spelling':
if file_type == sequence_type:
query_word = Word(**{sequence_type: list(query)})
else:
query_word = query.replace(trans_delimiter, '')
query_word = Word(**{sequence_type: list(query_word)})
elif tier_type.att_type == 'tier':
if file_type == sequence_type:
query_with̠td = '.'.join(query) if '.' not in query else query
for entry in corpus:
corpus_word_with_td = str(getattr(entry, sequence_type))
if query_with̠td == corpus_word_with_td: # if a word in corpus has the same transcription
return entry # that word in the corpus is to be referred to.
# the following should be run if no word found in corpus with the transcription
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: new_query})
else: # if file contains spelling
try:
query_word = corpus.corpus.find(query)
except KeyError:
# if the word in the file can't be found in the corpus
new_query = parse(query, trans_delimiter)
query_word = Word(**{sequence_type: list(new_query)})
return query_word
def parse(word, delimiter):
return word.split(delimiter) if delimiter in word else list(word)
| 44.203297
| 131
| 0.627968
| 1,992
| 16,090
| 4.853916
| 0.127008
| 0.064536
| 0.01117
| 0.025856
| 0.691799
| 0.630055
| 0.570586
| 0.527355
| 0.478229
| 0.456821
| 0
| 0.005364
| 0.293226
| 16,090
| 364
| 132
| 44.203297
| 0.844706
| 0.219888
| 0
| 0.53304
| 0
| 0
| 0.019187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048458
| false
| 0
| 0.030837
| 0.013216
| 0.15859
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a039f10e8309cc703a9629baacf52288c510305
| 5,046
|
py
|
Python
|
ex05-td/ex05-td.py
|
vijaykumarprabhu/rl-course
|
cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4
|
[
"MIT"
] | null | null | null |
ex05-td/ex05-td.py
|
vijaykumarprabhu/rl-course
|
cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4
|
[
"MIT"
] | null | null | null |
ex05-td/ex05-td.py
|
vijaykumarprabhu/rl-course
|
cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4
|
[
"MIT"
] | 1
|
2020-05-26T20:11:21.000Z
|
2020-05-26T20:11:21.000Z
|
import gym
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓',u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1,1]])
down = np.array([[0, 0], [0.5, 0.5], [1,0]])
left = np.array([[0, 0], [0.5, 0.5], [0,1]])
right = np.array([[1, 0], [0.5, 0.5], [1,1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0,vmax=.6)
ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap)
ax.grid(which='major', color='black', linestyle='-', linewidth=2)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
x, y = idx
if env.desc[idx] in ['H', 'G']:
ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0)))
plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0),
horizontalalignment='center',
verticalalignment='center')
continue
for a in range(len(tri)):
ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a])))
plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]),
horizontalalignment='center', verticalalignment='center',
fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))
plt.xticks([])
plt.yticks([])
def choose_abs_greedy_action(state, Q, epsilon):
action = None
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(env.action_space.n)
else:
action = np.argmax(Q[state,:])
return action
def max_action_state(state, Q):
action = np.argmax(Q[state,:])
return Q[state, action]
def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the sarsa algorithm
# This is some starting point performing random walks in the environment:
for i in range(num_ep):
s = env.reset()
done = False
a = choose_abs_greedy_action(s, Q, epsilon)
while not done:
s_, r, done, _ = env.step(a)
a_ = choose_abs_greedy_action(s_, Q, epsilon)
#update Q using sarsa
Q[s, a] = Q[s, a] + alpha * (r + (gamma * Q[s_,a_]) - Q[s,a])
s = s_
a = a_
return Q
def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the qlearning algorithm
for i in range(num_ep):
s = env.reset()
done = False
while not done:
a = choose_abs_greedy_action(s, Q, epsilon)
s_, r, done, _ = env.step(a)
#update Q using Q learning
Q[s, a] = Q[s, a] + alpha * (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] )
s = s_
return Q
env=gym.make('FrozenLake-v0')
#env=gym.make('FrozenLake-v0', is_slippery=False)
#env=gym.make('FrozenLake-v0', map_name="8x8")
print("Running sarsa...")
Q = sarsa(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
print("Running qlearning")
Q = qlearning(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
| 32.346154
| 104
| 0.561038
| 813
| 5,046
| 3.418204
| 0.210332
| 0.011515
| 0.010795
| 0.008636
| 0.479669
| 0.434329
| 0.398345
| 0.392587
| 0.34005
| 0.314861
| 0
| 0.030384
| 0.256441
| 5,046
| 155
| 105
| 32.554839
| 0.708955
| 0.118113
| 0
| 0.474576
| 0
| 0
| 0.037305
| 0
| 0
| 0
| 0
| 0.006452
| 0
| 1
| 0.059322
| false
| 0
| 0.042373
| 0
| 0.135593
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a04207ceaf45ab945588b6a283b882bf8a8d0e4
| 1,116
|
py
|
Python
|
frappe/website/doctype/website_route_meta/test_website_route_meta.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | null | null | null |
frappe/website/doctype/website_route_meta/test_website_route_meta.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | null | null | null |
frappe/website/doctype/website_route_meta/test_website_route_meta.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.utils import set_request
from frappe.website.serve import get_response
test_dependencies = ["Blog Post"]
class TestWebsiteRouteMeta(unittest.TestCase):
def test_meta_tag_generation(self):
blogs = frappe.get_all(
"Blog Post", fields=["name", "route"], filters={"published": 1, "route": ("!=", "")}, limit=1
)
blog = blogs[0]
# create meta tags for this route
doc = frappe.new_doc("Website Route Meta")
doc.append("meta_tags", {"key": "type", "value": "blog_post"})
doc.append("meta_tags", {"key": "og:title", "value": "My Blog"})
doc.name = blog.route
doc.insert()
# set request on this route
set_request(path=blog.route)
response = get_response()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue("""<meta name="type" content="blog_post">""" in html)
self.assertTrue("""<meta property="og:title" content="My Blog">""" in html)
def tearDown(self):
frappe.db.rollback()
| 27.219512
| 96
| 0.689964
| 152
| 1,116
| 4.960526
| 0.486842
| 0.04244
| 0.034483
| 0.045093
| 0.05305
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011518
| 0.144265
| 1,116
| 40
| 97
| 27.9
| 0.77801
| 0.145161
| 0
| 0
| 0
| 0
| 0.216245
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a061597dffbdc657df9899df8da9b8cc5a53c7e
| 644
|
py
|
Python
|
test/unittests/test_AgRunoff.py
|
rajadain/gwlf-e
|
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/test_AgRunoff.py
|
rajadain/gwlf-e
|
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/test_AgRunoff.py
|
rajadain/gwlf-e
|
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import AgRunoff
class TestAgRunoff(VariableUnitTest):
# @skip("not ready")
def test_AgRunoff(self):
z = self.z
np.testing.assert_array_almost_equal(
AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area),
AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area), decimal=7)
| 37.882353
| 118
| 0.630435
| 95
| 644
| 4.147368
| 0.431579
| 0.030457
| 0.030457
| 0.076142
| 0.390863
| 0.390863
| 0.390863
| 0.390863
| 0.390863
| 0.390863
| 0
| 0.014523
| 0.251553
| 644
| 16
| 119
| 40.25
| 0.802905
| 0.02795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a0650316e52ee5d4a9ff4c95b3303130df01427
| 3,397
|
py
|
Python
|
lingvo/tasks/car/car_layers_test.py
|
Harshs27/lingvo
|
bd396e651488b2e2c4a7416be077b4a0226c87c8
|
[
"Apache-2.0"
] | 2,611
|
2018-10-16T20:14:10.000Z
|
2022-03-31T14:48:41.000Z
|
lingvo/tasks/car/car_layers_test.py
|
Harshs27/lingvo
|
bd396e651488b2e2c4a7416be077b4a0226c87c8
|
[
"Apache-2.0"
] | 249
|
2018-10-27T06:02:29.000Z
|
2022-03-30T18:00:39.000Z
|
lingvo/tasks/car/car_layers_test.py
|
Harshs27/lingvo
|
bd396e651488b2e2c4a7416be077b4a0226c87c8
|
[
"Apache-2.0"
] | 436
|
2018-10-25T05:31:45.000Z
|
2022-03-31T07:26:03.000Z
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for car_layers."""
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car import car_layers
class CarLayersTest(test_utils.TestCase):
def _testNestedOutShape(self, p, input_shape, expected_shape):
batch_size, num_points, _ = input_shape
g = tf.Graph()
with g.as_default():
net = p.Instantiate()
input_data = py_utils.NestedMap(
points=tf.random.uniform(input_shape[:-1] + (3,)),
features=tf.random.uniform(input_shape),
padding=tf.zeros((batch_size, num_points), dtype=tf.float32),
label=tf.random.uniform((batch_size,),
minval=0,
maxval=16,
dtype=tf.int32))
result = net.FPropDefaultTheta(input_data)
with self.session(graph=g):
self.evaluate(tf.global_variables_initializer())
np_result = self.evaluate(result)
grouped_points_result = np_result.grouped_points
self.assertEqual(grouped_points_result.features.shape,
expected_shape.grouped_points.features)
self.assertEqual(grouped_points_result.points.shape,
expected_shape.grouped_points.points)
self.assertEqual(grouped_points_result.padding.shape,
expected_shape.grouped_points.padding)
query_points_result = np_result.query_points
self.assertEqual(query_points_result.points.shape,
expected_shape.query_points.points)
self.assertEqual(query_points_result.padding.shape,
expected_shape.query_points.padding)
def testSamplingAndGrouping(self):
for num_points in [1024, 256]:
for input_dims in [3, 6, 9]:
for group_size in [32, 64]:
p = car_layers.SamplingAndGroupingLayer.Params().Set(
name='SampleGroupTest',
num_samples=256,
ball_radius=0.2,
group_size=group_size,
sample_neighbors_uniformly=True)
grouped_points_shape = py_utils.NestedMap(
features=(8, 256, group_size, input_dims),
points=(8, 256, group_size, 3),
padding=(8, 256, group_size))
query_points_shape = py_utils.NestedMap(
points=(8, 256, 3), padding=(8, 256))
expected_shape = py_utils.NestedMap({
'grouped_points': grouped_points_shape,
'query_points': query_points_shape
})
self._testNestedOutShape(p, (8, num_points, input_dims),
expected_shape)
if __name__ == '__main__':
tf.test.main()
| 40.927711
| 80
| 0.637916
| 405
| 3,397
| 5.123457
| 0.37284
| 0.068916
| 0.052048
| 0.040482
| 0.226988
| 0.128193
| 0
| 0
| 0
| 0
| 0
| 0.023622
| 0.252281
| 3,397
| 82
| 81
| 41.426829
| 0.793307
| 0.206064
| 0
| 0
| 0
| 0
| 0.018304
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a066d9e3ce3fc69b55dd82dd4922f5e05e9b7a2
| 2,167
|
py
|
Python
|
take_snapshot.py
|
ITCave/sniff-for-changes-in-directory
|
59a06c1ca85033273845e8266038bfeacfc9f64d
|
[
"MIT"
] | null | null | null |
take_snapshot.py
|
ITCave/sniff-for-changes-in-directory
|
59a06c1ca85033273845e8266038bfeacfc9f64d
|
[
"MIT"
] | null | null | null |
take_snapshot.py
|
ITCave/sniff-for-changes-in-directory
|
59a06c1ca85033273845e8266038bfeacfc9f64d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Filename : take_snapshot.py
# @Date : 2019-07-15-13-44
# @Project: ITC-sniff-for-changes-in-directory
# @Author: Piotr Wołoszyn
# @Website: http://itcave.eu
# @Email: [email protected]
# @License: MIT
# @Copyright (C) 2019 ITGO Piotr Wołoszyn
# Generic imports
import os
import pickle
import re
import argparse
from datetime import datetime
def clear_path_string(s):
"""
Simple function that removes chars that are not allowed in file names
:param s: path_string
:return: cleaned_path_string
"""
return (re.sub('[^a-zA-Z]+', '#', s)).lower()
def sniff(sniff_path):
"""
Walks the path and stores information about directory content
:param sniff_path: relative or absolute path
:return: void
"""
sniff_path = str(sniff_path).lower()
# Variable in which information will be stored
dir_store = {}
# Recursive loop that walks through all of the subdirectories
for subdir, dirs, files in os.walk(sniff_path):
if subdir not in dir_store:
dir_store[subdir] = {}
dir_store[subdir]['subdirs'] = dirs
dir_store[subdir]['files'] = files
dir_store[subdir]['file_details'] = {}
for file in files:
f_path = os.path.join(subdir, file)
# The information that will be store for each of the files - in this case last file modification date
# Important: it's cross-platform relevant!
modified_date = os.path.getmtime(f_path)
dir_store[subdir]['file_details'][file] = (modified_date,)
# Name of a file in which data will be stored
dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S')
# Save pickled data
with open(dump_name + '.pkl', 'wb') as output:
pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL)
print("Directory Snapshot taken:", dump_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory Sniffer')
parser.add_argument('path', help='Path to the directory that you want to take a snapshot of')
args = parser.parse_args()
sniff(args.path)
| 28.513158
| 113
| 0.662206
| 299
| 2,167
| 4.655518
| 0.488294
| 0.045977
| 0.050287
| 0.025862
| 0.03592
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.22335
| 2,167
| 75
| 114
| 28.893333
| 0.816993
| 0.37748
| 0
| 0
| 0
| 0
| 0.137529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.275862
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a0800535a188f21223ec11106f263b7159026d7
| 7,221
|
py
|
Python
|
nuitka/nodes/GlobalsLocalsNodes.py
|
juanfra684/Nuitka
|
0e276895fadabefb598232f2ccf8cc7736c9a85b
|
[
"Apache-2.0"
] | 1
|
2020-04-13T18:56:02.000Z
|
2020-04-13T18:56:02.000Z
|
nuitka/nodes/GlobalsLocalsNodes.py
|
juanfra684/Nuitka
|
0e276895fadabefb598232f2ccf8cc7736c9a85b
|
[
"Apache-2.0"
] | 1
|
2020-07-11T17:53:56.000Z
|
2020-07-11T17:53:56.000Z
|
nuitka/nodes/GlobalsLocalsNodes.py
|
juanfra684/Nuitka
|
0e276895fadabefb598232f2ccf8cc7736c9a85b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Globals/locals/single arg dir nodes
These nodes give access to variables, highly problematic, because using them,
the code may change or access anything about them, so nothing can be trusted
anymore, if we start to not know where their value goes.
The "dir()" call without arguments is reformulated to locals or globals calls.
"""
from .ConstantRefNodes import makeConstantRefNode
from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict
from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase
from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef
class ExpressionBuiltinGlobals(ExpressionBase):
kind = "EXPRESSION_BUILTIN_GLOBALS"
def __init__(self, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def computeExpressionRaw(self, trace_collection):
return self, None, None
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
class ExpressionBuiltinLocalsBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
__slots__ = ("variable_traces", "locals_scope")
def __init__(self, locals_scope, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.variable_traces = None
self.locals_scope = locals_scope
def finalize(self):
del self.locals_scope
del self.variable_traces
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
def getVariableTraces(self):
return self.variable_traces
class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_UPDATED"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
assert locals_scope is not None
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
self.getParentVariableProvider()
)
trace_collection.onLocalsDictEscaped(self.locals_scope)
return self, None, None
class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_REF"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
if self.locals_scope.isMarkedForPropagation():
result = ExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable_name, source_ref=self.source_ref
),
value=ExpressionTempVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
for variable_name, variable in self.locals_scope.getPropagationVariables().items()
),
source_ref=self.source_ref,
)
new_result = result.computeExpressionRaw(trace_collection)
assert new_result[0] is result
self.finalize()
return result, "new_expression", "Propagated locals dictionary reference."
# Just inform the collection that all escaped unless it is abortative.
if not self.getParent().isStatementReturn():
trace_collection.onLocalsUsage(self.getParentVariableProvider())
return self, None, None
class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_COPY"
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
self.getParentVariableProvider()
)
for variable, variable_trace in self.variable_traces:
if (
not variable_trace.mustHaveValue()
and not variable_trace.mustNotHaveValue()
):
return self, None, None
# Other locals elsewhere.
if variable_trace.getNameUsageCount() > 1:
return self, None, None
pairs = []
for variable, variable_trace in self.variable_traces:
if variable_trace.mustHaveValue():
pairs.append(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable.getName(),
user_provided=True,
source_ref=self.source_ref,
),
value=ExpressionVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
)
# Locals is sorted of course.
def _sorted(pairs):
names = self.getParentVariableProvider().getLocalVariableNames()
return sorted(
pairs,
key=lambda pair: names.index(pair.getKey().getCompileTimeConstant()),
)
result = ExpressionMakeDict(pairs=_sorted(pairs), source_ref=self.source_ref)
return result, "new_expression", "Statically predicted locals dictionary."
class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase):
kind = "EXPRESSION_BUILTIN_DIR1"
def computeExpression(self, trace_collection):
# TODO: Quite some cases should be possible to predict and this
# should be using a slot, with "__dir__" being overloaded or not.
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
| 34.222749
| 102
| 0.655034
| 710
| 7,221
| 6.471831
| 0.330986
| 0.054842
| 0.039173
| 0.033079
| 0.387813
| 0.270076
| 0.257454
| 0.244614
| 0.244614
| 0.203264
| 0
| 0.002313
| 0.281678
| 7,221
| 210
| 103
| 34.385714
| 0.883555
| 0.212851
| 0
| 0.442623
| 0
| 0
| 0.048496
| 0.024956
| 0
| 0
| 0
| 0.004762
| 0.016393
| 1
| 0.155738
| false
| 0
| 0.032787
| 0.065574
| 0.418033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a096c14b2ddf561ce6b9429ac126077a454bd8e
| 6,298
|
py
|
Python
|
tests/chainerx_tests/unit_tests/test_scalar.py
|
yuhonghong66/chainer
|
15d475f54fc39587abd7264808c5e4b33782df9e
|
[
"MIT"
] | 1
|
2019-02-12T23:10:16.000Z
|
2019-02-12T23:10:16.000Z
|
tests/chainerx_tests/unit_tests/test_scalar.py
|
nolfwin/chainer
|
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
|
[
"MIT"
] | 2
|
2019-05-14T15:45:01.000Z
|
2019-05-15T07:12:49.000Z
|
tests/chainerx_tests/unit_tests/test_scalar.py
|
nolfwin/chainer
|
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
|
[
"MIT"
] | 1
|
2018-05-28T22:43:34.000Z
|
2018-05-28T22:43:34.000Z
|
import math
import pytest
import chainerx
def _check_cast_scalar_equals_data(scalar, data):
assert bool(scalar) == bool(data)
assert int(scalar) == int(data)
assert float(scalar) == float(data)
all_scalar_values = [
-2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')]
@pytest.mark.parametrize('value,dtype', [
(0, chainerx.int64),
(-1, chainerx.int64),
(0x7fffffffffffffff, chainerx.int64),
(-0x8000000000000000, chainerx.int64),
(0.0, chainerx.float64),
(float('inf'), chainerx.float64),
(float('nan'), chainerx.float64),
(True, chainerx.bool_),
(False, chainerx.bool_),
])
def test_init_without_dtype(value, dtype):
scalar = chainerx.Scalar(value)
assert scalar.dtype == dtype
if math.isnan(value):
assert math.isnan(scalar.tolist())
else:
assert scalar.tolist() == value
assert isinstance(scalar.tolist(), type(value))
@pytest.mark.parametrize('value,cast_dtype,expected_value', [
(0, chainerx.bool_, False),
(0, chainerx.int8, 0),
(0, chainerx.int16, 0),
(0, chainerx.int32, 0),
(0, chainerx.int64, 0),
(0, chainerx.uint8, 0),
(0, chainerx.float32, 0.0),
(0, chainerx.float64, 0.0),
(0.0, chainerx.bool_, False),
(0.0, chainerx.int8, 0),
(0.0, chainerx.int16, 0),
(0.0, chainerx.int32, 0),
(0.0, chainerx.int64, 0),
(0.0, chainerx.uint8, 0),
(0.0, chainerx.float32, 0.0),
(0.0, chainerx.float64, 0.0),
(1, chainerx.bool_, True),
(1, chainerx.int8, 1),
(1, chainerx.int16, 1),
(1, chainerx.int32, 1),
(1, chainerx.int64, 1),
(1, chainerx.uint8, 1),
(1, chainerx.float32, 1.0),
(1, chainerx.float64, 1.0),
(1.0, chainerx.bool_, True),
(1.0, chainerx.int8, 1),
(1.0, chainerx.int16, 1),
(1.0, chainerx.int32, 1),
(1.0, chainerx.int64, 1),
(1.0, chainerx.uint8, 1),
(1.0, chainerx.float32, 1.0),
(1.0, chainerx.float64, 1.0),
(-1, chainerx.bool_, True),
(-1, chainerx.int8, -1),
(-1, chainerx.int16, -1),
(-1, chainerx.int32, -1),
(-1, chainerx.int64, -1),
(-1, chainerx.uint8, 0xff),
(-1, chainerx.float32, -1.0),
(-1, chainerx.float64, -1.0),
(0x100, chainerx.bool_, True),
(0x100, chainerx.int8, 0),
(0x100, chainerx.int16, 0x100),
(0x100, chainerx.int32, 0x100),
(0x100, chainerx.int64, 0x100),
(0x100, chainerx.uint8, 0),
(0x10000, chainerx.bool_, True),
(0x10000, chainerx.int8, 0),
(0x10000, chainerx.int16, 0),
(0x10000, chainerx.int32, 0x10000),
(0x10000, chainerx.int64, 0x10000),
(0x10000, chainerx.uint8, 0),
(0x100000000, chainerx.bool_, True),
(0x100000000, chainerx.int8, 0),
(0x100000000, chainerx.int16, 0),
(0x100000000, chainerx.int32, 0),
(0x100000000, chainerx.int64, 0x100000000),
(0x100000000, chainerx.uint8, 0),
(0x7fffffffffffffff, chainerx.bool_, True),
(0x7fffffffffffffff, chainerx.int8, -1),
(0x7fffffffffffffff, chainerx.int16, -1),
(0x7fffffffffffffff, chainerx.int32, -1),
(0x7fffffffffffffff, chainerx.int64, 0x7fffffffffffffff),
(0x7fffffffffffffff, chainerx.uint8, 255),
])
def test_init_casted(value, cast_dtype, expected_value):
scalar = chainerx.Scalar(value, cast_dtype)
assert scalar.dtype == cast_dtype
if math.isnan(expected_value):
assert math.isnan(scalar.tolist())
else:
assert scalar.tolist() == expected_value
assert isinstance(scalar.tolist(), type(expected_value))
@pytest.mark.parametrize(
'value',
[0, 0.0, 1, 1.0, -1, 0x100, 0x10000, 0x100000000, 0x7fffffffffffffff])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init_with_dtype(value, dtype_spec):
expected_dtype = chainerx.dtype(dtype_spec)
scalar = chainerx.Scalar(value, dtype_spec)
assert scalar.dtype == expected_dtype
assert scalar == chainerx.Scalar(value, expected_dtype)
@pytest.mark.parametrize('value1,value2', [
# TODO(niboshi): Support commented-out cases
(0, 0),
(1, 1),
# (1, 1.0),
(1.5, 1.5),
(-1.5, -1.5),
(True, True),
(False, False),
# (True, 1),
# (True, 1.0),
# (False, 0),
# (False, 0.0),
# (float('inf'), float('inf')),
])
def test_equality(value1, value2):
scalar1 = chainerx.Scalar(value1)
scalar2 = chainerx.Scalar(value2)
assert scalar1 == scalar2
assert scalar2 == scalar1
assert scalar1 == value1
assert value1 == scalar1
assert scalar2 == value2
assert value2 == scalar2
assert scalar2 == value1
assert value1 == scalar2
assert scalar1 == value2
assert value2 == scalar1
@pytest.mark.parametrize('value1,value2', [
(0, 1),
(-1, 1),
(-1.0001, -1.0),
(-1.0001, -1),
(True, False),
(True, 1.1),
(1.0001, 1.0002),
(float('nan'), float('nan')),
])
def test_inequality(value1, value2):
scalar1 = chainerx.Scalar(value1)
scalar2 = chainerx.Scalar(value2)
assert scalar1 != scalar2
assert scalar2 != scalar1
assert scalar2 != value1
assert value1 != scalar2
assert scalar1 != value2
assert value2 != scalar1
@pytest.mark.parametrize('value', [
-2, 1, -1.5, 2.3, True, False
])
def test_cast(value):
scalar = chainerx.Scalar(value)
_check_cast_scalar_equals_data(scalar, value)
_check_cast_scalar_equals_data(+scalar, +value)
if isinstance(value, bool):
with pytest.raises(chainerx.DtypeError):
-scalar # should not be able to negate bool
else:
_check_cast_scalar_equals_data(-scalar, -value)
@pytest.mark.parametrize('value', all_scalar_values)
def test_dtype(value):
scalar = chainerx.Scalar(value)
if isinstance(value, bool):
assert scalar.dtype == chainerx.bool_
elif isinstance(value, int):
assert scalar.dtype == chainerx.int64
elif isinstance(value, float):
assert scalar.dtype == chainerx.float64
else:
assert False
@pytest.mark.parametrize('value', all_scalar_values)
def test_repr(value):
scalar = chainerx.Scalar(value)
assert repr(scalar) == repr(value)
assert str(scalar) == str(value)
def test_init_invalid():
with pytest.raises(TypeError):
chainerx.Scalar("1") # string, which is not a numeric
| 27.991111
| 74
| 0.634963
| 794
| 6,298
| 4.947103
| 0.11335
| 0.015275
| 0.038187
| 0.022403
| 0.464613
| 0.352088
| 0.278259
| 0.250255
| 0.242617
| 0.197047
| 0
| 0.111222
| 0.204827
| 6,298
| 224
| 75
| 28.116071
| 0.673123
| 0.03128
| 0
| 0.137363
| 0
| 0
| 0.019209
| 0.005089
| 0
| 0
| 0.063208
| 0.004464
| 0.192308
| 1
| 0.054945
| false
| 0
| 0.016484
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a0991a62637e4100b857f9f5423321dcccd74d3
| 8,265
|
py
|
Python
|
app.py
|
Tiemoue/SnakeGame
|
69124d38227502928924cc7dc6c57b41ade5d97c
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Tiemoue/SnakeGame
|
69124d38227502928924cc7dc6c57b41ade5d97c
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Tiemoue/SnakeGame
|
69124d38227502928924cc7dc6c57b41ade5d97c
|
[
"Apache-2.0"
] | null | null | null |
import sys
import pygame
from app_window import App_window
from button import Button
from snake import Snake
from food import Food
from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED
class App:
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.window = pygame.display.set_mode((WIDTH, HEIGHT))
self.gameover = pygame.font.SysFont("Comicsansms",
90,
bold=False,
italic=True)
self.font = pygame.font.SysFont(FONT, 20, bold=1)
self.running = True
self.state = "intro"
self.intro_buttons = []
self.playing_buttons = []
self.gameover_buttons = []
self.active_buttons = self.intro_buttons
self.app_window = App_window(self)
self.snake = Snake(self)
self.food = Food(self)
self.make_buttons()
def make_buttons(self):
# INTRO PLAY AND QUIT BUTTON
intro_play_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(49, 218, 46),
function=self.intro_to_play,
text="PLAY")
self.intro_buttons.append(intro_play_button)
intro_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.intro_quit,
text="QUIT")
self.intro_buttons.append(intro_quit_button)
# PLAYING QUIT BUTTON
playing_quit_button = Button(self, (WIDTH // 2) - 50,
20,
100,
33,
QUIT_BUTTON_COLOUR,
hover_colour=(219, 53, 43),
function=self.playing_quit,
text="QUIT")
self.playing_buttons.append(playing_quit_button)
# GAMEOVER BUTTON
gameover_play_again_button = Button(self,
50,
300,
WIDTH - 100,
50,
PLAY_BUTTON_COLOUR,
hover_colour=(36, 183, 23),
function=self.reset,
text="PLAY AGAIN")
self.gameover_buttons.append(gameover_play_again_button)
gameover_quit_button = Button(self,
50,
HEIGHT - 100,
WIDTH - 100,
50,
QUIT_BUTTON_COLOUR,
hover_colour=(216, 53, 43),
function=self.intro_quit,
text="QUIT")
self.gameover_buttons.append(gameover_quit_button)
def show_text(self, text, pos):
text = self.font.render(text, False, BLACK)
self.window.blit(text, (pos[0], pos[1]))
def reset(self):
# reset the game
self.state = "play"
self.active_buttons = self.playing_buttons
self.snake = Snake(self)
FPS[0] = 5
def run(self):
while self.running:
self.events()
self.update()
self.draw()
self.clock.tick(FPS[0])
pygame.quit()
sys.exit()
def events(self):
if self.state == "intro":
self.intro_events()
if self.state == "play":
self.playing_events()
if self.state == "dead":
self.gameover_events()
def update(self):
if self.state == "intro":
self.intro_update()
if self.state == "play":
self.playing_update()
if self.state == "dead":
self.gameover_update()
def draw(self):
self.window.fill(BG_COL)
if self.state == "intro":
self.intro_draw()
if self.state == "play":
self.playing_draw()
if self.state == "dead":
self.gameover_draw()
pygame.display.update()
# INTRO FUNCTIONS
def intro_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def intro_update(self):
for button in self.active_buttons:
button.update()
def intro_draw(self):
for button in self.active_buttons:
button.draw()
def intro_to_play(self):
self.state = "play"
self.active_buttons = self.playing_buttons
def intro_quit(self):
self.running = False
# PlAY FUNCTIONS
def playing_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# checks if a key is pressed down
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.key == pygame.K_LEFT and self.snake.direction != [
1, 0
]:
self.snake.direction = [-1, 0]
if event.key == pygame.K_RIGHT and self.snake.direction != [
-1, 0
]:
self.snake.direction = [1, 0]
if event.key == pygame.K_UP and self.snake.direction != [0, 1]:
self.snake.direction = [0, -1]
if event.key == pygame.K_DOWN and self.snake.direction != [
0, -1
]:
self.snake.direction = [0, 1]
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def playing_update(self):
for button in self.active_buttons:
button.update()
self.app_window.update()
def playing_draw(self):
self.app_window.draw()
for button in self.active_buttons:
button.draw()
self.show_text("Score: " + str(self.snake.length - 1), [20, 20])
def playing_quit(self):
self.running = False
# GAMEOVER FUNCTIONS
def gameover_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.active_buttons:
if button.hovered:
button.click()
def gameover_update(self):
for button in self.active_buttons:
button.update()
def gameover_draw(self):
for button in self.active_buttons:
button.draw()
self.game_over("GAME OVER", [WIDTH - 440, 30])
def game_over(self, text, pos):
text = self.gameover.render(text, False, RED)
self.window.blit(text, (pos[0], pos[1]))
| 35.320513
| 105
| 0.461101
| 809
| 8,265
| 4.566131
| 0.143387
| 0.02653
| 0.055225
| 0.041419
| 0.613969
| 0.520845
| 0.471034
| 0.455333
| 0.441256
| 0.387114
| 0
| 0.027164
| 0.452148
| 8,265
| 233
| 106
| 35.472103
| 0.788648
| 0.019238
| 0
| 0.455959
| 0
| 0
| 0.012968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108808
| false
| 0
| 0.036269
| 0
| 0.150259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a0a3ed9310efb11ad8dbed4a513b033dd037f31
| 4,697
|
py
|
Python
|
pupa/importers/bills.py
|
datamade/pupa
|
7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4
|
[
"BSD-3-Clause"
] | 3
|
2015-11-21T10:39:44.000Z
|
2019-11-17T16:34:53.000Z
|
pupa/importers/bills.py
|
datamade/pupa
|
7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4
|
[
"BSD-3-Clause"
] | 1
|
2015-11-23T19:43:48.000Z
|
2015-11-23T19:45:06.000Z
|
pupa/importers/bills.py
|
datamade/pupa
|
7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4
|
[
"BSD-3-Clause"
] | 5
|
2015-11-22T09:23:14.000Z
|
2019-11-17T16:34:57.000Z
|
from pupa.utils import fix_bill_id
from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle,
BillIdentifier, BillAction, BillActionRelatedEntity,
BillSponsorship, BillSource, BillDocument,
BillVersion, BillDocumentLink, BillVersionLink)
from .base import BaseImporter
from ..exceptions import PupaInternalError
class BillImporter(BaseImporter):
_type = 'bill'
model_class = Bill
related_models = {'abstracts': (BillAbstract, 'bill_id', {}),
'other_titles': (BillTitle, 'bill_id', {}),
'other_identifiers': (BillIdentifier, 'bill_id', {}),
'actions': (BillAction, 'bill_id', {
'related_entities': (BillActionRelatedEntity, 'action_id', {})}),
'related_bills': (RelatedBill, 'bill_id', {}),
'sponsorships': (BillSponsorship, 'bill_id', {}),
'sources': (BillSource, 'bill_id', {}),
'documents': (BillDocument, 'bill_id', {
'links': (BillDocumentLink, 'document_id', {})}),
'versions': (BillVersion, 'bill_id', {
'links': (BillVersionLink, 'version_id', {})}),
}
preserve_order = {'actions'}
def __init__(self, jurisdiction_id, org_importer, person_importer):
super(BillImporter, self).__init__(jurisdiction_id)
self.org_importer = org_importer
self.person_importer = person_importer
def get_object(self, bill):
spec = {
'legislative_session_id': bill['legislative_session_id'],
'identifier': bill['identifier'],
}
if 'from_organization_id' in bill:
spec['from_organization_id'] = bill['from_organization_id']
return self.model_class.objects.prefetch_related('actions__related_entities',
'versions__links',
'documents__links',
).get(**spec)
def limit_spec(self, spec):
spec['legislative_session__jurisdiction_id'] = self.jurisdiction_id
return spec
def prepare_for_db(self, data):
data['identifier'] = fix_bill_id(data['identifier'])
data['legislative_session_id'] = self.get_session_id(data.pop('legislative_session'))
if data['from_organization']:
data['from_organization_id'] = self.org_importer.resolve_json_id(
data.pop('from_organization'))
for action in data['actions']:
action['organization_id'] = self.org_importer.resolve_json_id(
action['organization_id'])
for entity in action['related_entities']:
if 'organization_id' in entity:
entity['organization_id'] = self.org_importer.resolve_json_id(
entity['organization_id'])
elif 'person_id' in entity:
entity['person_id'] = self.person_importer.resolve_json_id(
entity['person_id'])
for sponsor in data['sponsorships']:
if 'person_id' in sponsor:
sponsor['person_id'] = self.person_importer.resolve_json_id(
sponsor['person_id'], allow_no_match=True)
if 'organization_id' in sponsor:
sponsor['organization_id'] = self.org_importer.resolve_json_id(
sponsor['organization_id'], allow_no_match=True)
return data
def postimport(self):
# go through all RelatedBill objs that are attached to a bill in this jurisdiction and
# are currently unresolved
for rb in RelatedBill.objects.filter(
bill__legislative_session__jurisdiction_id=self.jurisdiction_id,
related_bill=None):
candidates = list(Bill.objects.filter(
legislative_session__identifier=rb.legislative_session,
legislative_session__jurisdiction_id=self.jurisdiction_id,
identifier=rb.identifier)
)
if len(candidates) == 1:
rb.related_bill = candidates[0]
rb.save()
elif len(candidates) > 1: # pragma: no cover
# if we ever see this, we need to add additional fields on the relation
raise PupaInternalError('multiple related_bill candidates found for {}'.format(rb))
| 48.42268
| 99
| 0.569087
| 434
| 4,697
| 5.852535
| 0.278802
| 0.066142
| 0.044882
| 0.049606
| 0.180315
| 0.155906
| 0.155906
| 0.09685
| 0
| 0
| 0
| 0.000958
| 0.333617
| 4,697
| 96
| 100
| 48.927083
| 0.810543
| 0.041729
| 0
| 0
| 0
| 0
| 0.179938
| 0.028247
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.2
| 0
| 0.3625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a0ae7fb6e8c16bf95848129bac7852b529505c4
| 6,799
|
py
|
Python
|
koino/plot/clusters.py
|
tritas/koino
|
21ecc30fdb76727b9b4b3cf695a39f6e860a52d6
|
[
"BSD-3-Clause"
] | null | null | null |
koino/plot/clusters.py
|
tritas/koino
|
21ecc30fdb76727b9b4b3cf695a39f6e860a52d6
|
[
"BSD-3-Clause"
] | null | null | null |
koino/plot/clusters.py
|
tritas/koino
|
21ecc30fdb76727b9b4b3cf695a39f6e860a52d6
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
import logging
import traceback
from os import makedirs
from os.path import exists, join
from textwrap import fill
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from koino.plot import big_square, default_alpha
from matplotlib import cm
from ..utils.base import jaccard
def plot_silhouette(
X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg
):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10))
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but here all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for k in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k])
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(k) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=default_alpha,
)
# Label the silhouette plots with their cluster numbers at the
# middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# Construct cluster
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
# colors = y
ax2.scatter(X[:, 0], X[:, 1], marker=".", s=20, lw=0, alpha=default_alpha, c=colors)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(
("Silhouette analysis for KMeans " "with n_clusters = %d" % n_clusters),
fontsize=14,
fontweight="bold",
)
plt.savefig(figure_fp)
plt.close()
plt.clf()
def plot_cluster_assignments(
X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=""
):
"""Clustering assignments scatter plot
Notes
-----
Can use mean or median to fix cluster centroid coordinates."""
if cluster_names is None:
cluster_names = ["Cluster {}".format(i + 1) for i in range(n_clusters)]
# We first reorder the data points according to the centroids labels
X = np.vstack([X[y == i] for i in range(n_clusters)])
y = np.hstack([y[y == i] for i in range(n_clusters)])
# Choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", n_clusters))
fig, ax = plt.subplots(figsize=big_square)
# for i in range(n_clusters):
# mask = y == i
# ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i],
# label=cluster_names[i])
ax.set_title(title)
ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)])
ax.axis("off")
# Add the labels for each cluster.
for i in range(n_clusters):
# Position of each label.
samples = np.atleast_2d(X[y == i, :2])
if not len(samples):
logging.warning(
"Probably singular cluster {} (shape:{})".format(i + 1, X[y == i].shape)
)
continue
xtext, ytext = np.median(samples, axis=0)
name = fill(cluster_names[i], width=20)
assert np.isfinite(xtext)
assert np.isfinite(ytext)
txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha="left")
txt.set_path_effects(
[PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]
)
# plt.legend()
figure_fp = join(figures_dir, "Clustered {}.png".format(title))
fig.tight_layout()
try:
fig.savefig(figure_fp, transparent=transparent)
except ValueError:
logging.warning(traceback.format_exc())
finally:
plt.close()
plt.clf()
def overlap_jaccard(
indx,
y_a,
y_b,
names_a,
names_b,
n_a=None,
n_b=None,
figsize=None,
output_dir=None,
alabel="socio-demographic",
blabel="purchases",
transparent=False,
):
"""Compute and plot contingency tables based on set intersection and
jaccard score.
# TODO: Normaliser par len(sd_set) ou len(diet_set) ?
"""
if not (n_a or n_b) or not output_dir:
return
elif output_dir and not exists(output_dir):
makedirs(output_dir)
else:
assert n_a and n_b
assert len(indx) == len(y_a) == len(y_b)
assert len(names_a) == n_a
assert len(names_b) == n_b
a_sets = [set(indx[y_a == i]) for i in range(n_a)]
b_sets = [set(indx[y_b == i]) for i in range(n_b)]
inter_sets = np.asarray(
[[len(set_a & set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Overlap between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
inter_sets,
annot=True,
fmt="6.0f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
inter_path = join(output_dir, "Clusters Intersection.png")
plt.savefig(inter_path, transparent=transparent)
plt.close()
plt.clf()
jac_arr = np.asarray(
[[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets],
dtype=np.float_,
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Jaccard scores between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
jac_arr,
annot=True,
fmt=".3f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
jaccard_path = join(output_dir, "Clusters Jaccard.png")
plt.savefig(jaccard_path, transparent=transparent)
plt.close()
plt.clf()
| 31.188073
| 88
| 0.633034
| 982
| 6,799
| 4.237271
| 0.280041
| 0.032444
| 0.015381
| 0.018505
| 0.174958
| 0.137467
| 0.116799
| 0.090363
| 0.029801
| 0.029801
| 0
| 0.019639
| 0.251066
| 6,799
| 217
| 89
| 31.331797
| 0.797526
| 0.181644
| 0
| 0.176471
| 0
| 0
| 0.088513
| 0
| 0
| 0
| 0
| 0.004608
| 0.039216
| 1
| 0.019608
| false
| 0
| 0.078431
| 0
| 0.104575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a0fca50d08846d8ef07b169b960d9c55f0826dc
| 3,504
|
py
|
Python
|
esppy/windows/score.py
|
PetreStegaroiu/python-esppy
|
d43781e94ad9236916901eeb3737d0b1b18d797a
|
[
"Apache-2.0"
] | null | null | null |
esppy/windows/score.py
|
PetreStegaroiu/python-esppy
|
d43781e94ad9236916901eeb3737d0b1b18d797a
|
[
"Apache-2.0"
] | null | null | null |
esppy/windows/score.py
|
PetreStegaroiu/python-esppy
|
d43781e94ad9236916901eeb3737d0b1b18d797a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import pandas as pd
import six
from .base import BaseWindow, attribute
from .features import SchemaFeature, ModelsFeature, ConnectorsFeature
from .utils import get_args, ensure_element
class ScoreWindow(BaseWindow, SchemaFeature, ModelsFeature, ConnectorsFeature):
'''
Score window
Parameters
----------
name : string, optional
The name of the window
schema : Schema, optional
The schema of the window
pubsub : bool, optional
Publish/subscribe mode for the window. When the project-level
value of pubsub is manual, true enables publishing and subscribing
for the window and false disables it.
description : string, optional
Description of the window
Attributes
----------
online_models : list-of-OnlineModels
List of online model objects
offline_models : list-of-OfflineModels
List of offline model objects
Returns
-------
:class:`ScoreWindow`
'''
window_type = 'score'
def __init__(self, name=None, schema=None, pubsub=None, description=None,
copyvars=None):
BaseWindow.__init__(self, **get_args(locals()))
# Set the online model for subclasses
if type(self).__name__ != 'ScoreWindow':
self.add_online_model(type(self).__name__)
def _create_schema_list(self, variables):
'''
Extract schema information from DataFrame
Parameters
----------
variables : DataFrame
The DataFrame containing schema information
Returns
-------
list
'''
labels = []
labels.append('id*:int64')
for name, dtype in zip(variables['Name'], variables['Type']):
if dtype == 'Num':
labels.append(name + ':double')
elif dtype == 'Char':
labels.append(name + ':string')
return labels
def import_schema_from_astore_output(self, output_variables_input):
'''
Import a schema from the astore CAS action output format
Parameters
----------
output_variables_input : DataFrame or list or string
The schema definition
'''
if isinstance(output_variables_input, six.string_types):
if os.path.isfile(output_variables_input):
output_variables_input = pd.read_csv(output_variables_input)
else:
output_variables_input = pd.read_csv(six.StringIO(output_variables_input))
if isinstance(output_variables_input, pd.DataFrame):
self.schema = self._create_schema_list(output_variables_input)
elif isinstance(output_variables_input, (tuple, list)):
self.schema = list(output_variables_input)
| 31.854545
| 90
| 0.656393
| 406
| 3,504
| 5.497537
| 0.431034
| 0.080645
| 0.107527
| 0.040323
| 0.081541
| 0.025986
| 0
| 0
| 0
| 0
| 0
| 0.002695
| 0.258847
| 3,504
| 109
| 91
| 32.146789
| 0.856758
| 0.442637
| 0
| 0
| 0
| 0
| 0.031783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.242424
| 0
| 0.424242
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a10152195fb9a20741a86fb44035860fed300f4
| 12,017
|
py
|
Python
|
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
''' IMPORTS '''
import re
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
VENDOR = 'Have I Been Pwned? V2'
MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1)
API_KEY = demisto.params().get('api_key')
USE_SSL = not demisto.params().get('insecure', False)
BASE_URL = 'https://haveibeenpwned.com/api/v3'
HEADERS = {
'hibp-api-key': API_KEY,
'user-agent': 'DBOT-API',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3
DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3
SUFFIXES = {
"email": '/breachedaccount/',
"domain": '/breaches?domain=',
"username": '/breachedaccount/',
"paste": '/pasteaccount/',
"email_truncate_verified": '?truncateResponse=false&includeUnverified=true',
"domain_truncate_verified": '&truncateResponse=false&includeUnverified=true',
"username_truncate_verified": '?truncateResponse=false&includeUnverified=true'
}
RETRIES_END_TIME = datetime.min
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
while True:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code != 429:
# Rate limit response code
break
if datetime.now() > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
wait_regex = re.search(r'\d+', res.json()['message'])
if wait_regex:
wait_amount = wait_regex.group()
else:
demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))
wait_amount = 5
if datetime.now() + timedelta(seconds=int(wait_amount)) > RETRIES_END_TIME:
return_error('Max retry time has exceeded.')
time.sleep(int(wait_amount))
if res.status_code == 404:
return None
if not res.status_code == 200:
if not res.status_code == 401:
demisto.error(
'Error in API call to Pwned Integration [%d]. Full text: %s' % (res.status_code, res.text))
return_error('Error in API call to Pwned Integration [%d] - %s' % (res.status_code, res.reason))
return None
return res.json()
def html_description_to_human_readable(breach_description):
"""
Converting from html description to hr
:param breach_description: Description of breach from API response
:return: Description string that altered HTML urls to clickable urls
for better readability in war-room
"""
html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')
patterns_found = html_link_pattern.findall(breach_description)
for link in patterns_found:
html_actual_address = link[0]
html_readable_name = link[2]
link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')'
breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1)
return breach_description
def data_to_markdown(query_type, query_arg, api_res, api_paste_res=None):
records_found = False
md = '### Have I Been Pwned query for ' + query_type.lower() + ': *' + query_arg + '*\n'
if api_res:
records_found = True
for breach in api_res:
verified_breach = 'Verified' if breach['IsVerified'] else 'Unverified'
md += '#### ' + breach['Title'] + ' (' + breach['Domain'] + '): ' + str(breach['PwnCount']) + \
' records breached [' + verified_breach + ' breach]\n'
md += 'Date: **' + breach['BreachDate'] + '**\n\n'
md += html_description_to_human_readable(breach['Description']) + '\n'
md += 'Data breached: **' + ','.join(breach['DataClasses']) + '**\n'
if api_paste_res:
records_found = True
pastes_list = []
for paste_breach in api_paste_res:
paste_entry = \
{
'Source': paste_breach['Source'],
'Title': paste_breach['Title'],
'ID': paste_breach['Id'],
'Date': '',
'Amount of emails in paste': str(paste_breach['EmailCount'])
}
if paste_breach['Date']:
paste_entry['Date'] = paste_breach['Date'].split('T')[0]
pastes_list.append(paste_entry)
md += tableToMarkdown('The email address was found in the following "Pastes":',
pastes_list,
['ID', 'Title', 'Date', 'Source', 'Amount of emails in paste'])
if not records_found:
md += 'No records found'
return md
def create_dbot_score_dictionary(indicator_value, indicator_type, dbot_score):
return {
'Indicator': indicator_value,
'Type': indicator_type,
'Vendor': VENDOR,
'Score': dbot_score
}
def create_context_entry(context_type, context_main_value, comp_sites, comp_pastes, malicious_score):
context_dict = dict() # dict
if context_type == 'email':
context_dict['Address'] = context_main_value
else:
context_dict['Name'] = context_main_value
context_dict['Pwned-V2'] = {
'Compromised': {
'Vendor': VENDOR,
'Reporters': ', '.join(comp_sites + comp_pastes)
}
}
if malicious_score == 3:
context_dict['Malicious'] = add_malicious_to_context(context_type)
return context_dict
def add_malicious_to_context(malicious_type):
return {
'Vendor': VENDOR,
'Description': 'The ' + malicious_type + ' has been compromised'
}
def email_to_entry_context(email, api_email_res, api_paste_res):
dbot_score = 0
comp_email = dict() # type: dict
comp_sites = sorted([item['Title'] for item in api_email_res])
comp_pastes = sorted(set(item['Source'] for item in api_paste_res))
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_EMAIL
email_context = create_context_entry('email', email, comp_sites, comp_pastes, DEFAULT_DBOT_SCORE_EMAIL)
comp_email[outputPaths['email']] = email_context
comp_email['DBotScore'] = create_dbot_score_dictionary(email, 'email', dbot_score)
return comp_email
def domain_to_entry_context(domain, api_res):
comp_sites = [item['Title'] for item in api_res]
comp_sites = sorted(comp_sites)
comp_domain = dict() # type: dict
dbot_score = 0
if len(comp_sites) > 0:
dbot_score = DEFAULT_DBOT_SCORE_DOMAIN
domain_context = create_context_entry('domain', domain, comp_sites, [], DEFAULT_DBOT_SCORE_DOMAIN)
comp_domain[outputPaths['domain']] = domain_context
comp_domain['DBotScore'] = create_dbot_score_dictionary(domain, 'domain', dbot_score)
return comp_domain
def set_retry_end_time():
global RETRIES_END_TIME
if MAX_RETRY_ALLOWED != -1:
RETRIES_END_TIME = datetime.now() + timedelta(seconds=int(MAX_RETRY_ALLOWED))
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module(args_dict):
"""
If the http request was successful the test will return OK
:return: 3 arrays of outputs
"""
http_request('GET', SUFFIXES.get("username", '') + 'test')
return ['ok'], [None], [None]
def pwned_email_command(args_dict):
"""
Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs
:param args_dict: the demisto argument - in this case the email list is needed
:return: 3 arrays of outputs
"""
email_list = argToList(args_dict.get('email', ''))
api_email_res_list, api_paste_res_list = pwned_email(email_list)
md_list = []
ec_list = []
for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list):
md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res))
ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or []))
return md_list, ec_list, api_email_res_list
def pwned_email(email_list):
"""
Executing the http requests
:param email_list: the email list that needed for the http requests
:return: 2 arrays of http requests outputs
"""
api_email_res_list = []
api_paste_res_list = []
for email in email_list:
email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified")
paste_suffix = SUFFIXES.get("paste") + email
api_email_res_list.append(http_request('GET', url_suffix=email_suffix))
api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix))
return api_email_res_list, api_paste_res_list
def pwned_domain_command(args_dict):
"""
Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the domain list is needed
:return: 3 arrays of outputs
"""
domain_list = argToList(args_dict.get('domain', ''))
api_res_list = pwned_domain(domain_list)
md_list = []
ec_list = []
for domain, api_res in zip(domain_list, api_res_list):
md_list.append(data_to_markdown('Domain', domain, api_res))
ec_list.append(domain_to_entry_context(domain, api_res or []))
return md_list, ec_list, api_res_list
def pwned_domain(domain_list):
"""
Executing the http request
:param domain_list: the domains list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for domain in domain_list:
suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
def pwned_username_command(args_dict):
"""
Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of
outputs
:param args_dict: the demisto argument - in this case the username list is needed
:return: 3 arrays of outputs
"""
username_list = argToList(args_dict.get('username', ''))
api_res_list = pwned_username(username_list)
md_list = []
ec_list = []
for username, api_res in zip(username_list, api_res_list):
md_list.append(data_to_markdown('Username', username, api_res))
ec_list.append(domain_to_entry_context(username, api_res or []))
return md_list, ec_list, api_res_list
def pwned_username(username_list):
"""
Executing the http request
:param username_list: the username list that needed for the http requests
:return: an array of http requests outputs
"""
api_res_list = []
for username in username_list:
suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified")
api_res_list.append(http_request('GET', url_suffix=suffix))
return api_res_list
command = demisto.command()
LOG('Command being called is: {}'.format(command))
try:
handle_proxy()
set_retry_end_time()
commands = {
'test-module': test_module,
'email': pwned_email_command,
'pwned-email': pwned_email_command,
'domain': pwned_domain_command,
'pwned-domain': pwned_domain_command,
'pwned-username': pwned_username_command
}
if command in commands:
md_list, ec_list, api_email_res_list = commands[command](demisto.args())
for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list):
return_outputs(md, ec, api_paste_res)
# Log exceptions
except Exception as e:
return_error(str(e))
| 34.042493
| 120
| 0.659732
| 1,549
| 12,017
| 4.834732
| 0.16204
| 0.023368
| 0.022032
| 0.016024
| 0.386567
| 0.32047
| 0.272266
| 0.245961
| 0.150354
| 0.130057
| 0
| 0.004422
| 0.228426
| 12,017
| 352
| 121
| 34.139205
| 0.803279
| 0.124906
| 0
| 0.138393
| 0
| 0
| 0.159155
| 0.035194
| 0.008929
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.013393
| 0.008929
| 0.160714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1109b1ce78a5e3058c1f4aa17021228f40ef11
| 817
|
py
|
Python
|
moshmosh/extensions/pipelines.py
|
Aloxaf/moshmosh
|
0cef4e3e574adabc7821a657bceba1254ca20f99
|
[
"MIT"
] | 114
|
2019-07-12T19:00:20.000Z
|
2021-12-02T17:28:36.000Z
|
moshmosh/extensions/pipelines.py
|
Aloxaf/moshmosh
|
0cef4e3e574adabc7821a657bceba1254ca20f99
|
[
"MIT"
] | 19
|
2019-07-12T18:34:59.000Z
|
2022-01-01T03:37:03.000Z
|
moshmosh/extensions/pipelines.py
|
Aloxaf/moshmosh
|
0cef4e3e574adabc7821a657bceba1254ca20f99
|
[
"MIT"
] | 7
|
2019-07-14T23:15:44.000Z
|
2021-12-27T21:15:17.000Z
|
from moshmosh.extension import Extension
from moshmosh.ast_compat import ast
class PipelineVisitor(ast.NodeTransformer):
"""
`a | f -> f(a)`, recursively
"""
def __init__(self, activation):
self.activation = activation
def visit_BinOp(self, n: ast.BinOp):
if n.lineno in self.activation and isinstance(n.op, ast.BitOr):
return ast.Call(
self.visit(n.right),
[self.visit(n.left)],
[],
lineno=n.lineno,
col_offset=n.col_offset
)
return self.generic_visit(n)
class Pipeline(Extension):
identifier = "pipeline"
def __init__(self):
self.visitor = PipelineVisitor(self.activation)
def rewrite_ast(self, node):
return self.visitor.visit(node)
| 27.233333
| 71
| 0.597307
| 93
| 817
| 5.096774
| 0.408602
| 0.118143
| 0.046414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294982
| 817
| 29
| 72
| 28.172414
| 0.822917
| 0.034272
| 0
| 0
| 0
| 0
| 0.010349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.095238
| 0.047619
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1121422d09eb0d72dfd59abaf853f521226d5b
| 3,641
|
py
|
Python
|
postpatch.py
|
mr-ma/basic-self-checksumming
|
ce3a0306fd96cc54476266bbf612d54201d2b46a
|
[
"MIT"
] | 1
|
2020-11-25T21:54:28.000Z
|
2020-11-25T21:54:28.000Z
|
postpatch.py
|
mr-ma/basic-self-checksumming
|
ce3a0306fd96cc54476266bbf612d54201d2b46a
|
[
"MIT"
] | null | null | null |
postpatch.py
|
mr-ma/basic-self-checksumming
|
ce3a0306fd96cc54476266bbf612d54201d2b46a
|
[
"MIT"
] | null | null | null |
import argparse
import os
import r2pipe
import struct
import mmap
import base64
from shutil import copyfile
import pprint
pp = pprint.PrettyPrinter(indent=4)
def precompute_hash(r2, offset, size):
print('Precomputing hash')
h = 0
print("r2 command to get the function body in base64:\np6e {}@{}".format(size, offset))
b64_func = r2.cmd("p6e {}@{}".format(size, offset))
func_bytes = bytearray(base64.b64decode(b64_func))
for b in func_bytes:
h = h ^ b
print('Precomuted hash:', hex(h))
return h
def patch_binary(mm, search_value, patch_value):
print("search value:{} patch value:{}".format(search_value, patch_value))
flag = "<I" # little-endian unsigned int
search_bytes = struct.pack(flag, search_value)
address = mm.find(search_bytes)
if address == -1:
mm.seek(0)
address = mm.find(search_bytes)
mm.seek(address, os.SEEK_SET)
patch_bytes = struct.pack(flag, patch_value)
mm.write(patch_bytes)
def get_protected_function_info(r2, function):
# find addresses and sizes of all functions
r2.cmd("aa")
r2.cmd("aac")
function_list = r2.cmdj("aflj")
# print(function_list)
funcs = {}
for func in function_list:
attr = {'size': func['size'], 'offset': func['offset']}
funcs[func['name']] = attr
# Basic search for mangled names
if function == 'main':
# main function is entry0 in the binary
function = 'entry0'
print("Cannot precompute the expected hash for the main function, why is that?")
exit(1)
match = 0
mangledName = ""
for name, attr in funcs.items():
# sometimes r2 prepends sym. to function names
if function in name:
mangledName = name
match += 1
if match != 1:
print("Failed to safely find function in the binary!")
pp.pprint(funcs)
exit(1)
return funcs[mangledName]
def main():
parser = argparse.ArgumentParser(
description='Postpatch protected C program.')
parser.add_argument('-b', action="store", dest="binary",
help="program.out protected program binary", required=True)
parser.add_argument('-f', action="store", dest="function",
help="protected function name", required=True)
parser.add_argument('-p', nargs="+", dest="placeholders",
help="list of used placeholders in the exact order of function, size, expected hash", required=True)
results = parser.parse_args()
print("python protect program", results)
r2 = r2pipe.open(results.binary)
funcInfo = get_protected_function_info(r2, results.function)
funcOffset = funcInfo["offset"]
funcSize = funcInfo["size"]
funcExpectedHash = precompute_hash(r2, funcOffset, funcSize)
print("funcOffset:{} funcSize:{} funcExpectedHash:{}".format(
funcOffset, funcSize, funcExpectedHash))
binaryFile, _ = os.path.splitext(results.binary)
patchedBinary = "{}-patched.out".format(binaryFile)
copyfile(results.binary, patchedBinary)
with open(patchedBinary, 'r+b') as binary:
mm = mmap.mmap(binary.fileno(), 0)
patch_binary(mm, int(results.placeholders[0]), int(funcSize))
patch_binary(mm, int(results.placeholders[1]), int(funcExpectedHash))
print("Successfully stored patched binary {}".format(patchedBinary))
status = os.system(
"chmod +x {}".format(patchedBinary))
if status != 0:
print("Error in setting permission, try:\n sudo chmod +x {}".format(patchedBinary))
exit(1)
if __name__ == '__main__':
main()
| 35.009615
| 124
| 0.649272
| 449
| 3,641
| 5.169265
| 0.340757
| 0.013787
| 0.016803
| 0.027143
| 0.098234
| 0.030159
| 0
| 0
| 0
| 0
| 0
| 0.01527
| 0.226586
| 3,641
| 103
| 125
| 35.349515
| 0.808949
| 0.055754
| 0
| 0.05814
| 0
| 0
| 0.204314
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.093023
| 0
| 0.162791
| 0.151163
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a12c052ef27cc1782214e2d795d2be846ea918a
| 6,420
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Julien Stroheker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_info
short_description: Get Azure Availability Set facts
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set.
resource_group:
description:
- The resource group to search for the desired availability set.
tags:
description:
- List of tags to be matched.
extends_documentation_fragment:
- azure.azcollection.azure
author:
- Julien Stroheker (@julienstroheker)
deprecated:
removed_in: '2.0.0'
why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead.
alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead.
'''
EXAMPLES = '''
- name: Get facts for one availability set
community.azure.azure_rm_availabilityset_info:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all availability sets in a specific resource group
community.azure.azure_rm_availabilityset_info:
resource_group: myResourceGroup
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: complex
contains:
location:
description:
- Location where the resource lives.
type: str
sample: eastus2
name:
description:
- Resource name.
type: str
sample: myAvailabilitySet
properties:
description:
- The properties of the resource.
type: dict
contains:
platformFaultDomainCount:
description:
- Fault Domain count.
type: int
sample: 3
platformUpdateDomainCount:
description:
- Update Domain count.
type: int
sample: 2
virtualMachines:
description:
- A list of references to all virtualmachines in the availability set.
type: list
sample: []
sku:
description:
- Location where the resource lives.
type: str
sample: Aligned
type:
description:
- Resource type.
type: str
sample: "Microsoft.Compute/availabilitySets"
tags:
description:
- Resource tags.
type: dict
sample: { env: sandbox }
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetInfo(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_info=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetInfo, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_availabilityset_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'",
version='3.0.0', collection_name='community.azure') # was 2.13
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_info']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_info']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetInfo()
if __name__ == '__main__':
main()
| 28.789238
| 132
| 0.588006
| 657
| 6,420
| 5.581431
| 0.304414
| 0.038996
| 0.041996
| 0.035451
| 0.184347
| 0.142351
| 0.120535
| 0.0949
| 0.0949
| 0.065994
| 0
| 0.005796
| 0.328193
| 6,420
| 222
| 133
| 28.918919
| 0.844424
| 0.055919
| 0
| 0.295181
| 0
| 0
| 0.522121
| 0.078376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03012
| false
| 0.012048
| 0.018072
| 0
| 0.072289
| 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1466d8bab50ddcdbbd51b7ac94f3df778f4c3c
| 40,433
|
py
|
Python
|
tests/v3_api/common.py
|
sowmyav27/rancher
|
a277d958cfcafca22f5da26b3a4582edd9cfd2af
|
[
"Apache-2.0"
] | null | null | null |
tests/v3_api/common.py
|
sowmyav27/rancher
|
a277d958cfcafca22f5da26b3a4582edd9cfd2af
|
[
"Apache-2.0"
] | null | null | null |
tests/v3_api/common.py
|
sowmyav27/rancher
|
a277d958cfcafca22f5da26b3a4582edd9cfd2af
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import json
import os
import random
import subprocess
import time
import requests
import ast
import paramiko
import rancher
from rancher import ApiError
from lib.aws import AmazonWebServices
DEFAULT_TIMEOUT = 120
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80")
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_IMAGE = "sangeetha/mytestcontainer"
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
CLUSTER_NAME_2 = ""
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_client_for_token(token):
return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
client = get_client_for_token(token)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == pod_count
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == pod_count
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= pod_count
return
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
return pods_result["items"]
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False):
command = 'kubectl --kubeconfig {0} {1}'.format(
kube_fname, cmd)
if json_out:
command += ' -o json'
if stderr:
result = run_command_with_stderr(command)
else:
result = run_command(command)
if json_out:
result = json.loads(result)
print(result)
return result
def run_command(command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
return (output, returncode)
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster):
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker:
schedulable_nodes.append(node)
return schedulable_nodes
def get_role_nodes(cluster, role):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
client = get_admin_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster)
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = node.externalIpAddress
cmd = curl_args + " http://" + host_ip + path
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name):
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url):
try:
requests.get(url)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def validate_http_response(cmd, target_name_list, client_pod=None):
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username="root", password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload):
url = get_endpoint_url_for_workload(p_client, workload)
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster):
source_port = workload.publicEndpoints[0]["port"]
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = node.externalIpAddress
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
print("\nuuid:")
print(multiClusterApp.uuid)
time.sleep(5)
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
print(mapp.state)
while mapp.state != "active":
print(mapp.uuid)
print(mapp.state)
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def validate_mcapp_cluster(app_id, p_client):
mcapp = p_client.list_app(name=app_id).data
assert len(mcapp) == 1
app = mcapp[0]
return app
def wait_for_mcapp_cluster_level_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
mcapps = client.list_app(name=app_id).data
start = time.time()
assert len(mcapps) == 1
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
apps = client.list_app(name=app_id).data
assert len(apps) == 1
mapp = apps[0]
return mapp
def get_admin_client_and_cluster_mcapp():
clusters = []
client = get_admin_client()
if CLUSTER_NAME == "" or CLUSTER_NAME_2 == "":
clusters = client.list_cluster().data
else:
clusters.append(client.list_cluster(name=CLUSTER_NAME).data)
clusters.append(client.list_cluster(name=CLUSTER_NAME_2).data)
assert len(clusters) == 2
return client, clusters
def validate_multi_cluster_app_cluster(app_id1, app_id2, p_client1, p_client2):
validate_mcapp_cluster(app_id1, p_client1)
if app_id2 != "":
validate_mcapp_cluster(app_id2, p_client2)
# verify app in cluster is active or not
wait_for_mcapp_cluster_level_to_active(p_client1, app_id1)
if app_id2 != "":
wait_for_mcapp_cluster_level_to_active(p_client2, app_id2)
| 34.946413
| 113
| 0.635471
| 4,938
| 40,433
| 4.968206
| 0.094978
| 0.015693
| 0.014837
| 0.011617
| 0.491012
| 0.428158
| 0.366119
| 0.334447
| 0.291077
| 0.265601
| 0
| 0.009046
| 0.261841
| 40,433
| 1,157
| 114
| 34.946413
| 0.81294
| 0.011402
| 0
| 0.401879
| 0
| 0
| 0.08711
| 0.011812
| 0
| 0
| 0
| 0
| 0.08977
| 1
| 0.090814
| false
| 0.005219
| 0.014614
| 0.008351
| 0.169102
| 0.02714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a148b5d990f7bb1b408caafa5a8cdf6862a40c6
| 1,195
|
py
|
Python
|
LeetCode/Python3/String/20. Valid Parentheses.py
|
WatsonWangZh/CodingPractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 11
|
2019-09-01T22:36:00.000Z
|
2021-11-08T08:57:20.000Z
|
LeetCode/Python3/String/20. Valid Parentheses.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | null | null | null |
LeetCode/Python3/String/20. Valid Parentheses.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 2
|
2020-05-27T14:58:52.000Z
|
2020-05-27T15:04:17.000Z
|
# Given a string containing just the characters '(', ')', '{', '}', '[' and ']',
# determine if the input string is valid.
# An input string is valid if:
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
# Note that an empty string is also considered valid.
# Example 1:
# Input: "()"
# Output: true
# Example 2:
# Input: "()[]{}"
# Output: true
# Example 3:
# Input: "(]"
# Output: false
# Example 4:
# Input: "([)]"
# Output: false
# Example 5:
# Input: "{[]}"
# Output: true
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
dict = {')':'(',']':'[','}':'{'}
stack = []
for ch in s:
if ch in dict.values():
stack.append(ch)
elif ch in dict.keys():
if len(stack) == 0 or (stack.pop() != dict[ch]):
return False
return len(stack) == 0
def main():
s = Solution()
print(s.isValid("()"))
print(s.isValid("()[]{}"))
print(s.isValid("(]"))
print(s.isValid("([)]"))
print(s.isValid("{[]}"))
if __name__ == "__main__":
main()
| 21.727273
| 81
| 0.512134
| 144
| 1,195
| 4.194444
| 0.458333
| 0.09106
| 0.107616
| 0.119205
| 0.187086
| 0.107616
| 0.107616
| 0.107616
| 0.107616
| 0.107616
| 0
| 0.008274
| 0.29205
| 1,195
| 54
| 82
| 22.12963
| 0.705674
| 0.441004
| 0
| 0
| 0
| 0
| 0.051948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a14fdb015437094dc2620963de3edb83ccea376
| 1,706
|
py
|
Python
|
backend/ibutsu_server/controllers/health_controller.py
|
rsnyman/ibutsu-server
|
3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc
|
[
"MIT"
] | 10
|
2020-07-07T07:00:00.000Z
|
2022-03-30T12:21:44.000Z
|
backend/ibutsu_server/controllers/health_controller.py
|
rsnyman/ibutsu-server
|
3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc
|
[
"MIT"
] | 133
|
2020-07-06T20:10:45.000Z
|
2022-03-31T15:19:19.000Z
|
backend/ibutsu_server/controllers/health_controller.py
|
rsnyman/ibutsu-server
|
3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc
|
[
"MIT"
] | 9
|
2020-07-06T17:33:29.000Z
|
2022-03-07T00:08:00.000Z
|
from flask import current_app
from sqlalchemy.exc import InterfaceError
from sqlalchemy.exc import OperationalError
try:
from ibutsu_server.db.model import Result
IS_CONNECTED = True
except ImportError:
IS_CONNECTED = False
def get_health(token_info=None, user=None):
"""Get a health report
:rtype: Health
"""
return {"status": "OK", "message": "Service is running"}
def get_database_health(token_info=None, user=None):
"""Get a health report for the database
:rtype: Health
"""
response = ({"status": "Pending", "message": "Fetching service status"}, 200)
# Try to connect to the database, and handle various responses
try:
if not IS_CONNECTED:
response = ({"status": "Error", "message": "Incomplete database configuration"}, 500)
else:
Result.query.first()
response = ({"status": "OK", "message": "Service is running"}, 200)
except OperationalError:
response = ({"status": "Error", "message": "Unable to connect to the database"}, 500)
except InterfaceError:
response = ({"status": "Error", "message": "Incorrect connection configuration"}, 500)
except Exception as e:
response = ({"status": "Error", "message": str(e)}, 500)
return response
def get_health_info(token_info=None, user=None):
"""Get the information about this server
:rtype: HealthInfo
"""
return {
"frontend": current_app.config.get("FRONTEND_URL", "http://localhost:3000"),
"backend": current_app.config.get("BACKEND_URL", "http://localhost:8080"),
"api_ui": current_app.config.get("BACKEND_URL", "http://localhost:8080") + "/api/ui/",
}
| 32.188679
| 97
| 0.649472
| 200
| 1,706
| 5.44
| 0.4
| 0.077206
| 0.069853
| 0.095588
| 0.292279
| 0.251838
| 0.172794
| 0.172794
| 0.172794
| 0.172794
| 0
| 0.022321
| 0.212192
| 1,706
| 52
| 98
| 32.807692
| 0.787202
| 0.121923
| 0
| 0.064516
| 0
| 0
| 0.279725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.16129
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a14ffa87c6cf8cc2785c57c735fc9bf74a8348d
| 9,200
|
py
|
Python
|
src/python/tsnecuda/TSNE.py
|
rappdw/tsne-cuda
|
1249948704b0ae1847ebe614801f8a326050b0f4
|
[
"BSD-3-Clause"
] | 1
|
2019-11-06T21:56:26.000Z
|
2019-11-06T21:56:26.000Z
|
src/python/tsnecuda/TSNE.py
|
amitadate/tsne-cuda
|
efa209834879bba88814e74d7062539f4de07cc2
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/tsnecuda/TSNE.py
|
amitadate/tsne-cuda
|
efa209834879bba88814e74d7062539f4de07cc2
|
[
"BSD-3-Clause"
] | null | null | null |
"""Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors
Refs:
References
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
import numpy as N
import ctypes
import os
import pkg_resources
def ord_string(s):
b = bytearray()
arr = b.extend(map(ord, s))
return N.array([x for x in b] + [0]).astype(N.uint8)
class TSNE(object):
def __init__(self,
n_components=2,
perplexity=50.0,
early_exaggeration=2.0,
learning_rate=200.0,
num_neighbors=1023,
force_magnify_iters=250,
pre_momentum=0.5,
post_momentum=0.8,
theta=0.5,
epssq=0.0025,
n_iter=1000,
n_iter_without_progress=1000,
min_grad_norm=1e-7,
perplexity_epsilon=1e-3,
metric='euclidean',
init='random',
return_style='once',
num_snapshots=5,
verbose=0,
random_seed=None,
use_interactive=False,
viz_timeout=10000,
viz_server="tcp://localhost:5556",
dump_points=False,
dump_file="dump.txt",
dump_interval=1,
print_interval=10,
device=0,
):
"""Initialization method for barnes hut T-SNE class.
"""
# Initialize the variables
self.n_components = int(n_components)
if self.n_components != 2:
raise ValueError('The current barnes-hut implementation does not support projection into dimensions other than 2 for now.')
self.perplexity = float(perplexity)
self.early_exaggeration = float(early_exaggeration)
self.learning_rate = float(learning_rate)
self.n_iter = int(n_iter)
self.n_iter_without_progress = int(n_iter_without_progress)
self.min_grad_norm = float(min_grad_norm)
if metric not in ['euclidean']:
raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\'euclidean\' for now.')
else:
self.metric = metric
if init not in ['random']:
raise ValueError('Non-Random initialization is not currently supported. Please use init=\'random\' for now.')
else:
self.init = init
self.verbose = int(verbose)
# Initialize non-sklearn variables
self.num_neighbors = int(num_neighbors)
self.force_magnify_iters = int(force_magnify_iters)
self.perplexity_epsilon = float(perplexity_epsilon)
self.pre_momentum = float(pre_momentum)
self.post_momentum = float(post_momentum)
self.theta = float(theta)
self.epssq =float(epssq)
self.device = int(device)
self.print_interval = int(print_interval)
# Point dumpoing
self.dump_file = str(dump_file)
self.dump_points = bool(dump_points)
self.dump_interval = int(dump_interval)
# Viz
self.use_interactive = bool(use_interactive)
self.viz_server = str(viz_server)
self.viz_timeout = int(viz_timeout)
# Return style
if return_style not in ['once','snapshots']:
raise ValueError('Invalid return style...')
elif return_style == 'once':
self.return_style = 0
elif return_style == 'snapshots':
self.return_style = 1
self.num_snapshots = int(num_snapshots)
# Build the hooks for the BH T-SNE library
self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location
# self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library
# self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library
self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library
# Hook the BH T-SNE function
self._lib.pymodule_bh_tsne.restype = None
self._lib.pymodule_bh_tsne.argtypes = [
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points
ctypes.POINTER(N.ctypeslib.c_intp), # dims
ctypes.c_float, # Perplexity
ctypes.c_float, # Learning Rate
ctypes.c_float, # Magnitude Factor
ctypes.c_int, # Num Neighbors
ctypes.c_int, # Iterations
ctypes.c_int, # Iterations no progress
ctypes.c_int, # Force Magnify iterations
ctypes.c_float, # Perplexity search epsilon
ctypes.c_float, # pre-exaggeration momentum
ctypes.c_float, # post-exaggeration momentum
ctypes.c_float, # Theta
ctypes.c_float, # epssq
ctypes.c_float, # Minimum gradient norm
ctypes.c_int, # Initialization types
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data
ctypes.c_bool, # Dump points
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File
ctypes.c_int, # Dump interval
ctypes.c_bool, # Use interactive
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server
ctypes.c_int, # Viz timeout
ctypes.c_int, # Verbosity
ctypes.c_int, # Print interval
ctypes.c_int, # GPU Device
ctypes.c_int, # Return style
ctypes.c_int ] # Number of snapshots
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Arguments:
X {array} -- Input array, shape: (n_points, n_dimensions)
Keyword Arguments:
y {None} -- Ignored (default: {None})
"""
# Setup points/embedding requirements
self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])
self.embedding = N.zeros(shape=(X.shape[0],self.n_components))
self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])
# Handle Initialization
if y is None:
self.initialization_type = 1
self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED'])
else:
self.initialization_type = 3
self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])
# Handle dumping and viz strings
self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self._lib.pymodule_bh_tsne(
self.embedding, # result
self.points, # points
self.points.ctypes.shape, # dims
ctypes.c_float(self.perplexity), # Perplexity
ctypes.c_float(self.learning_rate), # Learning Rate
ctypes.c_float(self.early_exaggeration), # Magnitude Factor
ctypes.c_int(self.num_neighbors), # Num Neighbors
ctypes.c_int(self.n_iter), # Iterations
ctypes.c_int(self.n_iter_without_progress), # Iterations no progress
ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations
ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon
ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum
ctypes.c_float(self.post_momentum), # post-exaggeration momentum
ctypes.c_float(self.theta), # Theta
ctypes.c_float(self.epssq), # epssq
ctypes.c_float(self.min_grad_norm), # Minimum gradient norm
ctypes.c_int(self.initialization_type), # Initialization types
self.init_data, # Initialization Data
ctypes.c_bool(self.dump_points), # Dump points
self.dump_file_, # Dump File
ctypes.c_int(self.dump_interval), # Dump interval
ctypes.c_bool(self.use_interactive), # Use interactive
self.viz_server_, # Viz Server
ctypes.c_int(self.viz_timeout), # Viz timeout
ctypes.c_int(self.verbose), # Verbosity
ctypes.c_int(self.print_interval), # Print interval
ctypes.c_int(self.device), # GPU Device
ctypes.c_int(self.return_style), # Return style
ctypes.c_int(self.num_snapshots) ) # Number of snapshots
return self.embedding
| 42.790698
| 135
| 0.595978
| 1,077
| 9,200
| 4.905292
| 0.225627
| 0.06095
| 0.045429
| 0.0318
| 0.343744
| 0.154079
| 0.059057
| 0.046943
| 0.02915
| 0.020822
| 0
| 0.016682
| 0.309348
| 9,200
| 214
| 136
| 42.990654
| 0.814762
| 0.213152
| 0
| 0.174194
| 0
| 0
| 0.087073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019355
| false
| 0
| 0.025806
| 0
| 0.064516
| 0.019355
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1872d6c1f83595585a8fcb3b624041de25bbab
| 22,787
|
py
|
Python
|
python/helpers/pydev/pydevd_file_utils.py
|
kirmerzlikin/intellij-community
|
b5f5b5f38904b32c459203633e4ea17dc2736827
|
[
"Apache-2.0"
] | 1
|
2019-08-02T21:11:19.000Z
|
2019-08-02T21:11:19.000Z
|
python/helpers/pydev/pydevd_file_utils.py
|
kirmerzlikin/intellij-community
|
b5f5b5f38904b32c459203633e4ea17dc2736827
|
[
"Apache-2.0"
] | null | null | null |
python/helpers/pydev/pydevd_file_utils.py
|
kirmerzlikin/intellij-community
|
b5f5b5f38904b32c459203633e4ea17dc2736827
|
[
"Apache-2.0"
] | null | null | null |
r'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
alternatively, this can be set with an environment variable from the command line:
set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
import json
import os.path
import sys
import traceback
_os_normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n')
traceback.print_exc()
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n')
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
# Converting json lists to tuple
PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]
# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
# (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
# r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
convert_to_long_pathname = lambda filename:filename
convert_to_short_pathname = lambda filename:filename
get_path_with_real_case = lambda filename:filename
if sys.platform == 'win32':
try:
import ctypes
from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD
GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetLongPathName.restype = DWORD
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetShortPathName.restype = DWORD
def _convert_to_long_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetLongPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _convert_to_short_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetShortPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _get_path_with_real_case(filename):
ret = convert_to_long_pathname(convert_to_short_pathname(filename))
# This doesn't handle the drive letter properly (it'll be unchanged).
# Make sure the drive letter is always uppercase.
if len(ret) > 1 and ret[1] == ':' and ret[0].islower():
return ret[0].upper() + ret[1:]
return ret
# Check that it actually works
_get_path_with_real_case(__file__)
except:
# Something didn't quite work out, leave no-op conversions in place.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
traceback.print_exc()
else:
convert_to_long_pathname = _convert_to_long_pathname
convert_to_short_pathname = _convert_to_short_pathname
get_path_with_real_case = _get_path_with_real_case
elif IS_JYTHON and IS_WINDOWS:
def get_path_with_real_case(filename):
from java.io import File
f = File(filename)
ret = f.getCanonicalPath()
if IS_PY2 and not isinstance(ret, str):
return ret.encode(getfilesystemencoding())
return ret
if IS_WINDOWS:
if IS_JYTHON:
def normcase(filename):
return filename.lower()
else:
def normcase(filename):
# `normcase` doesn't lower case on Python 2 for non-English locale, but Java
# side does it, so we should do it manually.
if '~' in filename:
filename = convert_to_long_pathname(filename)
filename = _os_normcase(filename)
return filename.lower()
else:
def normcase(filename):
return filename # no-op
_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'
def set_ide_os(os):
'''
We need to set the IDE os because the host where the code is running may be
actually different from the client (and the point is that we want the proper
paths to translate from the client to the server).
:param os:
'UNIX' or 'WINDOWS'
'''
global _ide_os
prev = _ide_os
if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)
os = 'WINDOWS'
assert os in ('WINDOWS', 'UNIX')
if prev != os:
_ide_os = os
# We need to (re)setup how the client <-> server translation works to provide proper separators.
setup_client_server_paths(_last_client_server_paths_set)
DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')
# Caches filled as requested during the debug session.
NORM_PATHS_CONTAINER = {}
NORM_PATHS_AND_BASE_CONTAINER = {}
def _NormFile(filename):
abs_path, real_path = _NormPaths(filename)
return real_path
def _AbsFile(filename):
abs_path, real_path = _NormPaths(filename)
return abs_path
# Returns tuple of absolute path and real path for given filename
def _NormPaths(filename):
try:
return NORM_PATHS_CONTAINER[filename]
except KeyError:
if filename.__class__ != str:
raise AssertionError('Paths passed to _NormPaths must be str. Found: %s (%s)' % (filename, type(filename)))
abs_path = _NormPath(filename, os.path.abspath)
real_path = _NormPath(filename, rPath)
# cache it for fast access later
NORM_PATHS_CONTAINER[filename] = abs_path, real_path
return abs_path, real_path
def _NormPath(filename, normpath):
r = normpath(filename)
ind = r.find('.zip')
if ind == -1:
ind = r.find('.egg')
if ind != -1:
ind += 4
zip_path = r[:ind]
inner_path = r[ind:]
if inner_path.startswith('!'):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with 'exists'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
if inner_path:
r = join(normcase(zip_path), inner_path)
return r
r = normcase(r)
return r
_ZIP_SEARCH_CACHE = {}
_NOT_FOUND_SENTINEL = object()
def exists(file):
if os.path.exists(file):
return file
ind = file.find('.zip')
if ind == -1:
ind = file.find('.egg')
if ind != -1:
ind += 4
zip_path = file[:ind]
inner_path = file[ind:]
if inner_path.startswith("!"):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with '_NormPath'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)
if zip_file_obj is None:
return False
elif zip_file_obj is _NOT_FOUND_SENTINEL:
try:
import zipfile
zip_file_obj = zipfile.ZipFile(zip_path, 'r')
_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj
except:
_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL
return False
try:
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
_info = zip_file_obj.getinfo(inner_path.replace('\\', '/'))
return join(zip_path, inner_path)
except KeyError:
return None
return None
# Now, let's do a quick test to see if we're working with a version of python that has no problems
# related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.flush()
NORM_SEARCH_CACHE = {}
initial_norm_paths = _NormPaths
def _NormPaths(filename): # Let's redefine _NormPaths to work with paths that may be incorrect
try:
return NORM_SEARCH_CACHE[filename]
except KeyError:
abs_path, real_path = initial_norm_paths(filename)
if not exists(real_path):
# We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
abs_path, real_path = initial_norm_paths(join(path, filename))
if exists(real_path):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
abs_path = filename
real_path = filename
NORM_SEARCH_CACHE[filename] = abs_path, real_path
return abs_path, real_path
except:
# Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
# Note: as these functions may be rebound, users should always import
# pydevd_file_utils and then use:
#
# pydevd_file_utils.norm_file_to_client
# pydevd_file_utils.norm_file_to_server
#
# instead of importing any of those names to a given scope.
def _original_file_to_client(filename, cache={}):
try:
return cache[filename]
except KeyError:
cache[filename] = get_path_with_real_case(_AbsFile(filename))
return cache[filename]
_original_file_to_server = _NormFile
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
def _fix_path(path, sep):
if path.endswith('/') or path.endswith('\\'):
path = path[:-1]
if sep != '/':
path = path.replace('/', sep)
return path
_last_client_server_paths_set = []
def setup_client_server_paths(paths):
'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''
global norm_file_to_client
global norm_file_to_server
global _last_client_server_paths_set
_last_client_server_paths_set = paths[:]
# Work on the client and server slashes.
python_sep = '\\' if IS_WINDOWS else '/'
eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/'
norm_filename_to_server_container = {}
norm_filename_to_client_container = {}
initial_paths = list(paths)
paths_from_eclipse_to_python = initial_paths[:]
# Apply normcase to the existing paths to follow the os preferences.
for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]):
if IS_PY2:
if isinstance(path0, unicode):
path0 = path0.encode(sys.getfilesystemencoding())
if isinstance(path1, unicode):
path1 = path1.encode(sys.getfilesystemencoding())
path0 = _fix_path(path0, eclipse_sep)
path1 = _fix_path(path1, python_sep)
initial_paths[i] = (path0, path1)
paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1))
if not paths_from_eclipse_to_python:
# no translation step needed (just inline the calls)
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
return
# only setup translation functions if absolutely needed!
def _norm_file_to_server(filename, cache=norm_filename_to_server_container):
# Eclipse will send the passed filename to be translated to the python process
# So, this would be 'NormFileFromEclipseToPython'
try:
return cache[filename]
except KeyError:
if eclipse_sep != python_sep:
# Make sure that the separators are what we expect from the IDE.
filename = filename.replace(python_sep, eclipse_sep)
# used to translate a path from the client to the debug server
translated = normcase(filename)
for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:
if translated.startswith(eclipse_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(eclipse_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in paths_from_eclipse_to_python]))
# Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep != python_sep:
translated = translated.replace(eclipse_sep, python_sep)
translated = _NormFile(translated)
cache[filename] = translated
return translated
def _norm_file_to_client(filename, cache=norm_filename_to_client_container):
# The result of this method will be passed to eclipse
# So, this would be 'NormFileFromPythonToEclipse'
try:
return cache[filename]
except KeyError:
# used to translate a path from the debug server to the client
translated = _NormFile(filename)
# After getting the real path, let's get it with the path with
# the real case and then obtain a new normalized copy, just in case
# the path is different now.
translated_proper_case = get_path_with_real_case(translated)
translated = _NormFile(translated_proper_case)
if IS_WINDOWS:
if translated.lower() != translated_proper_case.lower():
translated_proper_case = translated
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write(
'pydev debugger: _NormFile changed path (from: %s to %s)\n' % (
translated_proper_case, translated))
for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):
if translated.startswith(python_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
# Note: use the non-normalized version.
eclipse_prefix = initial_paths[i][0]
translated = eclipse_prefix + translated_proper_case[len(python_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in paths_from_eclipse_to_python]))
translated = translated_proper_case
if eclipse_sep != python_sep:
translated = translated.replace(python_sep, eclipse_sep)
# The resulting path is not in the python process, so, we cannot do a _NormFile here,
# only at the beginning of this method.
cache[filename] = translated
return translated
norm_file_to_server = _norm_file_to_server
norm_file_to_client = _norm_file_to_client
setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)
def _is_int(filename):
# isdigit() doesn't support negative numbers
try:
int(filename)
return True
except:
return False
def is_real_file(filename):
# Check for Jupyter cells
return not _is_int(filename) and not filename.startswith("<ipython-input")
# For given file f returns tuple of its absolute path, real path and base name
def get_abs_path_real_path_and_base_from_file(f):
try:
return NORM_PATHS_AND_BASE_CONTAINER[f]
except:
if _NormPaths is None: # Interpreter shutdown
return f
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
if not is_real_file(f):
abs_path, real_path, base = f, f, f
else:
abs_path, real_path = _NormPaths(f)
base = basename(real_path)
ret = abs_path, real_path, base
NORM_PATHS_AND_BASE_CONTAINER[f] = ret
return ret
def get_abs_path_real_path_and_base_from_frame(frame):
try:
return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
# This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
if f is not None and f.startswith (('build/bdist.', 'build\\bdist.')):
# files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>
f = frame.f_globals['__file__']
if get_abs_path_real_path_and_base_from_file is None: # Interpreter shutdown
return f
ret = get_abs_path_real_path_and_base_from_file(f)
# Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference).
NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret
return ret
def get_fullname(mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def get_package_dir(mod_name):
for path in sys.path:
mod_path = join(path, mod_name.replace('.', '/'))
if os.path.isdir(mod_path):
return mod_path
return None
| 38.169179
| 167
| 0.642647
| 2,981
| 22,787
| 4.678296
| 0.1684
| 0.016779
| 0.029829
| 0.033558
| 0.397462
| 0.296501
| 0.230819
| 0.209881
| 0.173742
| 0.147641
| 0
| 0.004848
| 0.275903
| 22,787
| 596
| 168
| 38.233221
| 0.840364
| 0.276122
| 0
| 0.366848
| 0
| 0.002717
| 0.082678
| 0.016966
| 0
| 0
| 0
| 0
| 0.005435
| 1
| 0.067935
| false
| 0.002717
| 0.032609
| 0.008152
| 0.222826
| 0.008152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a19d381c903a0542a3789f5f4dbe06b87e43247
| 5,481
|
py
|
Python
|
src/networking/SessionsManager.py
|
OfekHarel/Orion-Connection-Software
|
2e767e31f94574bf464e24eaeed87f36b3247ca6
|
[
"MIT"
] | 1
|
2021-05-18T10:16:05.000Z
|
2021-05-18T10:16:05.000Z
|
src/networking/SessionsManager.py
|
OfekHarel/Orion-Connection-Software
|
2e767e31f94574bf464e24eaeed87f36b3247ca6
|
[
"MIT"
] | null | null | null |
src/networking/SessionsManager.py
|
OfekHarel/Orion-Connection-Software
|
2e767e31f94574bf464e24eaeed87f36b3247ca6
|
[
"MIT"
] | null | null | null |
import os
import socket
from random import randint
from src import Constants
from src.Constants import Network
from src.networking import NetworkPackets, Actions
from src.networking.Client import Client
from src.utils.DH_Encryption import Encryption
from src.utils.Enum import Enum
class SessionManager:
"""
This class is responsible for dealing with any flow of net msgs.
"""
def __init__(self):
address = (Network.SERVER_IP, Network.SERVER_PORT)
self.client = Client(str(socket.gethostname()), address)
self.val = self.client.connect()
if not self.val:
Network.IS_ONLINE = False
def go_crypto(self):
msg = NetworkPackets.split(self.client.receive())
g = int(msg[1])
n = int(msg[2])
g_pow_a_mod_n = int(msg[3])
crypto = Encryption(g, n)
crypto.get_full_key(g_pow_a_mod_n)
self.client.send(NetworkPackets.assemble(NetworkPackets.NetLogicIncomes.CONNECT.value,
str(crypto.get_partial_key())))
self.client.crypto = crypto
def gen_id(self) -> str:
num = str(randint(1, 9999))
num = num.zfill(4)
return num
def open_id_file(self):
try:
open(Constants.Files.ID, 'r+').close()
except FileNotFoundError:
open(Constants.Files.ID, 'x').close()
finally:
file = open(Constants.Files.ID, 'r+')
return file
def sync(self):
"""
This function contains the full process of the sync phase.
"""
if Network.IS_ONLINE:
self.go_crypto()
num = ""
file = self.open_id_file()
if os.path.getsize(Constants.Files.ID) == 0: # Empty
is_valid = False
while not is_valid:
num = self.gen_id()
self.client.send(NetworkPackets.assemble("COMPUTER", "ID_VAL", num))
msg = NetworkPackets.split(self.client.receive())
is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value
file.write(num)
else:
is_valid = False
num = file.read()
while not is_valid:
self.client.send(NetworkPackets.assemble("COMPUTER", "ID_VAL", num))
msg = NetworkPackets.split(self.client.receive())
is_valid = msg[0] == NetworkPackets.NetLogicIncomes.VALID.value
if not is_valid:
num = self.gen_id()
if num != file.read():
file.close()
os.remove(Constants.Files.ID)
file = self.open_id_file()
file.write(num)
file.close()
def manage(self, incoming: str):
"""
This functions deals with the execution of the required operations.
:param incoming: Raw net msg.
"""
if Network.IS_ONLINE:
incoming = NetworkPackets.split(incoming)[0]
if incoming in Operation.list():
if incoming == Operation.VOL_UP.value:
Actions.vol_up()
elif incoming == Operation.VOL_DOWN.value:
Actions.vol_down()
elif incoming == Operation.PAUSE_PLAY_TOGGLE.value:
Actions.play_pause()
elif incoming == Operation.SKIP.value:
Actions.next_song()
elif incoming == Operation.PREV.value:
Actions.prev_song()
elif incoming == Operation.MUTE.value:
Actions.mute()
elif incoming == Operation.OFF.value:
Actions.shut_down()
elif incoming == Operation.SLEEP.value:
Actions.sleep()
elif incoming == Operation.RESTART.value:
Actions.restart()
elif incoming == Operation.LOCK.value:
Actions.lock()
elif incoming == Operation.LOG_OUT.value:
Actions.log_out()
elif incoming == Operation.MAGIC_BTN.value:
Actions.run_file()
elif incoming == Operation.USAGE.value:
self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_use_as_str_arr()))
elif incoming == Operation.DISCONNECT.value:
self.client.send(NetworkPackets.assemble(Operation.DISCONNECT.value))
return Operation.DISCONNECT
elif incoming in NetworkPackets.NetLogicIncomes.list():
if incoming == NetworkPackets.NetLogicIncomes.PAIRED.value:
Constants.Network.IS_PAIRING = True
self.client.send(NetworkPackets.assemble(arr=Actions.COMPUTER.get_specs_as_str_arr()))
elif incoming == NetworkPackets.NetLogicIncomes.INVALID:
pass
class Operation(Enum):
"""
All the operations that can be asked to execute.
"""
VOL_UP = "VOL_UP"
VOL_DOWN = "VOL_DOWN"
PAUSE_PLAY_TOGGLE = "PTT"
SKIP = "SKIP"
PREV = "PREV"
MUTE = "MUTE"
OFF = "OFF"
SLEEP = "SLEEP"
RESTART = "RESTRT"
LOCK = "LCK"
LOG_OUT = "LGOT"
DISCONNECT = "DISCON"
MAGIC_BTN = "MAGIC"
SPECS_INFO = "SPECS"
USAGE = "USE"
| 35.590909
| 106
| 0.550447
| 576
| 5,481
| 5.109375
| 0.276042
| 0.061162
| 0.092762
| 0.057085
| 0.236833
| 0.178389
| 0.149507
| 0.134557
| 0.134557
| 0.095821
| 0
| 0.00367
| 0.353768
| 5,481
| 153
| 107
| 35.823529
| 0.827216
| 0.050538
| 0
| 0.173554
| 0
| 0
| 0.019926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049587
| false
| 0.008264
| 0.07438
| 0
| 0.289256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1b13a3f3b068eb65d58c46e8bda2b6889a1fef
| 12,738
|
py
|
Python
|
tests/test_http_client.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2021-05-29T08:57:58.000Z
|
2022-02-19T07:09:25.000Z
|
tests/test_http_client.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2021-05-31T10:18:36.000Z
|
2022-01-25T11:39:03.000Z
|
tests/test_http_client.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-05-29T13:27:10.000Z
|
2021-05-29T13:27:10.000Z
|
from __future__ import absolute_import, division, print_function
import pytest
import json
import asyncio
import stripe
import urllib3
from stripe import six, util
from async_stripe.http_client import TornadoAsyncHTTPClient
pytestmark = pytest.mark.asyncio
VALID_API_METHODS = ("get", "post", "delete")
class StripeClientTestCase(object):
REQUEST_LIBRARIES = ["AsyncHTTPClient"]
@pytest.fixture
def request_mocks(self, mocker):
request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
request_mocks[lib] = mocker.patch("async_stripe.http_client.%s" % (lib,))
return request_mocks
class TestNewDefaultHttpClient(StripeClientTestCase):
@pytest.fixture(autouse=True)
def setup_warnings(self, request_mocks):
original_filters = stripe.http_client.warnings.filters[:]
stripe.http_client.warnings.simplefilter("ignore")
yield
stripe.http_client.warnings.filters = original_filters
def check_default(self, none_libs, expected):
for lib in none_libs:
setattr(stripe.http_client, lib, None)
inst = stripe.http_client.new_default_http_client()
assert isinstance(inst, expected)
def test_new_default_http_client_tornado(self):
self.check_default((), TornadoAsyncHTTPClient)
class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase):
from contextlib import contextmanager
def assert_sleep_times(self, client, expected):
until = len(expected)
actual = list(
map(lambda i: client._sleep_time_seconds(i + 1), range(until))
)
assert expected == actual
@contextmanager
def mock_max_delay(self, new_value):
original_value = stripe.http_client.HTTPClient.MAX_DELAY
stripe.http_client.HTTPClient.MAX_DELAY = new_value
try:
yield self
finally:
stripe.http_client.HTTPClient.MAX_DELAY = original_value
def test_sleep_time_exponential_back_off(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
with self.mock_max_delay(10):
self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0])
def test_initial_delay_as_minimum(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t * 0.001
initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY
self.assert_sleep_times(client, [initial_delay] * 5)
def test_maximum_delay(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
max_delay = stripe.http_client.HTTPClient.MAX_DELAY
expected = [0.5, 1.0, max_delay, max_delay, max_delay]
self.assert_sleep_times(client, expected)
def test_retry_after_header(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
# Prefer retry-after if it's bigger
assert 30 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "30"})
)
# Prefer default if it's bigger
assert 2 == client._sleep_time_seconds(
3, (None, 409, {"retry-after": "1"})
)
# Ignore crazy-big values
assert 1 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "300"})
)
def test_randomness_added(self):
client = stripe.http_client.new_default_http_client()
random_value = 0.8
client._add_jitter_time = lambda t: t * random_value
base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value
with self.mock_max_delay(10):
expected = [
stripe.http_client.HTTPClient.INITIAL_DELAY,
base_value * 2,
base_value * 4,
base_value * 8,
base_value * 16,
]
self.assert_sleep_times(client, expected)
def test_jitter_has_randomness_but_within_range(self):
client = stripe.http_client.new_default_http_client()
jittered_ones = set(
map(lambda _: client._add_jitter_time(1), list(range(100)))
)
assert len(jittered_ones) > 1
assert all(0.5 <= val <= 1 for val in jittered_ones)
class TestRetryConditionsDefaultHttpClient(StripeClientTestCase):
def test_should_retry_on_codes(self):
one_xx = list(range(100, 104))
two_xx = list(range(200, 209))
three_xx = list(range(300, 308))
four_xx = list(range(400, 431))
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
codes = one_xx + two_xx + three_xx + four_xx
codes.remove(409)
# These status codes should not be retried by default.
for code in codes:
assert client._should_retry((None, code, None), None, 0) is False
# These status codes should be retried by default.
assert client._should_retry((None, 409, None), None, 0) is True
assert client._should_retry((None, 500, None), None, 0) is True
assert client._should_retry((None, 503, None), None, 0) is True
def test_should_retry_on_error(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert client._should_retry(None, api_connection_error, 0) is True
api_connection_error.should_retry = False
assert client._should_retry(None, api_connection_error, 0) is False
def test_should_retry_on_stripe_should_retry_true(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "true"}
# Ordinarily, we would not retry a 400, but with the header as true, we would.
assert client._should_retry((None, 400, {}), None, 0) is False
assert client._should_retry((None, 400, headers), None, 0) is True
def test_should_retry_on_stripe_should_retry_false(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "false"}
# Ordinarily, we would retry a 500, but with the header as false, we would not.
assert client._should_retry((None, 500, {}), None, 0) is True
assert client._should_retry((None, 500, headers), None, 0) is False
def test_should_retry_on_num_retries(self, mocker):
client = stripe.http_client.new_default_http_client()
max_test_retries = 10
client._max_network_retries = lambda: max_test_retries
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert (
client._should_retry(
None, api_connection_error, max_test_retries + 1
)
is False
)
assert (
client._should_retry((None, 409, None), None, max_test_retries + 1)
is False
)
class TestHTTPClient(object):
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {"enable_telemetry": stripe.enable_telemetry}
stripe.enable_telemetry = False
yield
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
async def test_sends_telemetry_on_second_request(self, mocker):
class TestClient(stripe.http_client.HTTPClient):
pass
stripe.enable_telemetry = True
url = "http://fake.url"
client = TestClient()
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_123"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
client.request.assert_called_with("get", url, {}, None)
response_future = asyncio.Future()
response_future.set_result(["", 200, {"Request-Id": "req_234"}])
client.request = mocker.MagicMock(
return_value=response_future
)
_, code, _ = await client.request_with_retries("get", url, {}, None)
assert code == 200
args, _ = client.request.call_args
assert "X-Stripe-Client-Telemetry" in args[2]
telemetry = json.loads(args[2]["X-Stripe-Client-Telemetry"])
assert telemetry["last_request_metrics"]["request_id"] == "req_123"
class ClientTestBase(object):
@pytest.fixture
def request_mock(self, request_mocks):
return request_mocks[self.REQUEST_CLIENT.name]
@property
def valid_url(self, path="/foo"):
return "https://api.stripe.com%s" % (path,)
def make_request(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return client.request_with_retries(method, url, headers, post_data)
async def make_request_stream(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return await client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def mock_response(self):
def mock_response(mock, body, code):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock, error):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, abs_url, headers, params, is_streaming=False
):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return check_call
def test_request(self, request_mock, mock_response, check_call):
mock_response(request_mock, '{"foo": "baz"}', 200)
for method in VALID_API_METHODS:
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
body, code, _ = self.make_request(method, abs_url, headers, data)
assert code == 200
assert body == '{"foo": "baz"}'
check_call(request_mock, method, abs_url, data, headers)
def test_request_stream(
self, mocker, request_mock, mock_response, check_call
):
for method in VALID_API_METHODS:
mock_response(request_mock, "some streamed content", 200)
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
print(dir(self))
print("make_request_stream" in dir(self))
stream, code, _ = self.make_request_stream(
method, abs_url, headers, data
)
assert code == 200
# Here we need to convert and align all content on one type (string)
# as some clients return a string stream others a byte stream.
body_content = stream.read()
if hasattr(body_content, "decode"):
body_content = body_content.decode("utf-8")
assert body_content == "some streamed content"
mocker.resetall()
def test_exception(self, request_mock, mock_error):
mock_error(request_mock)
with pytest.raises(stripe.error.APIConnectionError):
self.make_request("get", self.valid_url, {}, None)
class TestTornadoAsyncHTTPClient:
# :TODO: Write tests for tornado client
pass
class TestAPIEncode(StripeClientTestCase):
def test_encode_dict(self):
body = {"foo": {"dob": {"month": 1}, "name": "bat"}}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[dob][month]", 1) in values
assert ("foo[name]", "bat") in values
def test_encode_array(self):
body = {"foo": [{"dob": {"month": 1}, "name": "bat"}]}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[0][dob][month]", 1) in values
assert ("foo[0][name]", "bat") in values
| 33.968
| 87
| 0.637777
| 1,559
| 12,738
| 4.939064
| 0.167415
| 0.050649
| 0.054026
| 0.033766
| 0.505974
| 0.457922
| 0.392468
| 0.359221
| 0.308182
| 0.277792
| 0
| 0.019128
| 0.265348
| 12,738
| 374
| 88
| 34.058824
| 0.803697
| 0.040038
| 0
| 0.292593
| 0
| 0
| 0.060735
| 0.006303
| 0
| 0
| 0
| 0.002674
| 0.137037
| 1
| 0.118519
| false
| 0.007407
| 0.033333
| 0.007407
| 0.218519
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1ba6256767aa29fb3040084aca24a7cb8fa6a0
| 1,685
|
py
|
Python
|
http/static/jsonvis.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
http/static/jsonvis.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
http/static/jsonvis.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
"""\
Provides html file visualization of a json dataset
"""
import json
import subprocess
class JsonVis:
def _open_list(self):
self.instructions.append(('open_list', None))
def _list_item(self, data):
self.instructions.append(('list_item', str(data)))
def _horiz_rule(self):
self.instructions.append(('horiz_rule', None))
def _close_list(self):
self.instructions.append(('close_list', None))
def _iterate(self, data: iter):
if isinstance(data, dict):
for key, value in data.items():
self._iterate(key)
self._open_list()
self._iterate(value)
self._close_list()
elif isinstance(data, list):
self._open_list()
for item in data:
self._iterate(item)
self._horiz_rule()
self._close_list()
else:
self._list_item(data)
def download(self, url: str):
"""
Store a python dictionary generated from json data at <url> in
self.data. Returns self.
"""
data = subprocess.run(
f"curl '{url}'", # Quotes required around url for URL parameters
stdout=subprocess.PIPE,
shell=True
).stdout
self.data = json.loads(data)
return self
def make_instructions(self):
"""
Take self.data and return a list of instructions about its html
visualization that is parsed by json.html.
"""
self.instructions = []
self._open_list()
self._iterate(self.data)
self._close_list()
return self.instructions
| 28.083333
| 77
| 0.570326
| 192
| 1,685
| 4.828125
| 0.354167
| 0.06041
| 0.09493
| 0.084142
| 0.114347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.32819
| 1,685
| 59
| 78
| 28.559322
| 0.818905
| 0.173294
| 0
| 0.15
| 0
| 0
| 0.037821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175
| false
| 0
| 0.05
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1ba6be1f357556fe2a856981f28ab99cb28a6a
| 1,104
|
py
|
Python
|
sim2d_game_analyzer/MainWindow.py
|
goncamateus/sim2d_game_analyzer
|
3e264df75896b8856163478535fdeeeef2d66b2f
|
[
"MIT"
] | 1
|
2020-06-16T05:53:24.000Z
|
2020-06-16T05:53:24.000Z
|
sim2d_game_analyzer/MainWindow.py
|
goncamateus/sim2d_game_analyzer
|
3e264df75896b8856163478535fdeeeef2d66b2f
|
[
"MIT"
] | null | null | null |
sim2d_game_analyzer/MainWindow.py
|
goncamateus/sim2d_game_analyzer
|
3e264df75896b8856163478535fdeeeef2d66b2f
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5 import QtGui
from PyQt5.QtCore import QEvent, QPoint, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow,
QTabWidget, QVBoxLayout, QWidget)
from sim2d_game_analyzer.fmdb_tab import FMDBTab
class MainWindow(QMainWindow):
title = "Sim2d Game Analyzer"
top = 500
left = 100
width = 70*4
height = 130*4
def __init__(self):
QMainWindow.__init__(self)
self.setGeometry(self.screen().geometry())
self.setWindowTitle(self.title)
self.setWindowIcon(QIcon("sim2d_game_analyzer/figures/icon.png"))
vbox = QVBoxLayout()
tabWidget = QTabWidget()
tabWidget.setFont(QtGui.QFont("Sanserif", 12))
self.fmdb_tab = FMDBTab()
tabWidget.addTab(self.fmdb_tab, FMDBTab.NAME)
vbox.addWidget(tabWidget)
wid = QWidget(self)
self.setCentralWidget(wid)
wid.setLayout(vbox)
if __name__ == "__main__":
app = QApplication(sys.argv)
mainwindow = MainWindow()
sys.exit(app.exec())
| 27.6
| 75
| 0.663043
| 124
| 1,104
| 5.717742
| 0.524194
| 0.050776
| 0.071932
| 0.050776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026128
| 0.237319
| 1,104
| 39
| 76
| 28.307692
| 0.815914
| 0
| 0
| 0
| 0
| 0
| 0.064312
| 0.032609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.193548
| 0
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1bf05862b9f835d8a239dbc4e6161e02b46036
| 12,543
|
py
|
Python
|
cmd/extractor.py
|
Grammarian/sicle
|
94d826477d269c4c3534d83fa2e940de1d923140
|
[
"Apache-2.0"
] | null | null | null |
cmd/extractor.py
|
Grammarian/sicle
|
94d826477d269c4c3534d83fa2e940de1d923140
|
[
"Apache-2.0"
] | null | null | null |
cmd/extractor.py
|
Grammarian/sicle
|
94d826477d269c4c3534d83fa2e940de1d923140
|
[
"Apache-2.0"
] | null | null | null |
# pip install openpyxl
# pip install cuid
import os.path
import json
import datetime
from openpyxl import load_workbook
import cuid # https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects
SOURCE_XLSX = "./data/CLP_combined.xlsx"
EXTRACT_OUTPUT_DIR = "../server/extract"
SCHOOL_TITLES = ["ORGANISATION_ID", "ORGANISATION_NAME", "ORG_ELECTORATE", "P_ADDRESS1", "P_SUBURB", "P_STATE",
"P_POSTCODE", "S_ADDRESS1", "S_SUBURB", "S_STATE", "S_POSTCODE", "SCHOOL_NAME", "SCH_ELECTORATE",
"SCHOOL_ID", "SCHOOL_P_ADDRESS1",
"SCHOOL_P_SUBURB", "SCHOOL_P_STATE", "SCHOOL_P_POSTCODE", "SCHOOL_S_ADDRESS1", "SCHOOL_S_SUBURB",
"SCHOOL_S_STATE", "SCHOOL_S_POSTCODE", "LOCATION_NAME", "LOC_ELECTORATE", "LOC_S_ADDRESS1",
"LOC_S_SUBURB", "LOC_S_STATE", "LOC_S_POSTCODE"]
ORGANISATION_FIELDS = {"ORGANISATION_ID": "CLP_ORGANISATION_ID", "ORGANISATION_NAME": "NAME",
"ORG_ELECTORATE": "ELECTORATE", "S_ADDRESS1": "ADDRESS", "S_SUBURB": "SUBURB",
"S_STATE": "STATE", "S_POSTCODE": "POSTCODE", }
SCHOOL_FIELDS = {"SCHOOL_NAME": "NAME", "SCH_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"ORGANISATION_ID": "CLP_ORGANISATION_ID",
"SCHOOL_S_ADDRESS1": "ADDRESS", "SCHOOL_S_SUBURB": "SUBURB", "SCHOOL_S_STATE": "STATE",
"SCHOOL_S_POSTCODE": "POSTCODE", }
LOCATION_FIELDS = {"LOCATION_NAME": "NAME", "LOC_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID",
"LOC_S_ADDRESS1": "ADDRESS", "LOC_S_SUBURB": "SUBURB", "LOC_S_STATE": "STATE",
"LOC_S_POSTCODE": "POSTCODE"}
TEACHER_TITLES = ["TEACHER_ID", "ORGANISATION_NAME", "SCHOOL_NAME", "TEACHER_NAME", "TITLE", "LNAME", "FNAME",
"TEACHER_LANGUAGES", "P_ADDRESS1", "P_ADDRESS2", "P_SUBURB", "P_STATE", "P_POSTCODE",
"TELEPHONE", "TEL_EVENING", "EMAIL", "MOBILE", "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION",
"FIELD_OF_EDUCATION", "DEGREE_COUNTRY", "DEGREE_YEAR", "ORGANISATION_ID", "SCHOOL_ID"]
STUDENT_TITLES = ["SCHOOL_NAME", "SCHOOL_ID", "STUDENT_ID", "STUDENT_SRN", "LOCATION_NAME",
"STUDENT_LNAME", "STUDENT_FNAME", "DOB", "TEL", "LOCATION_NAME_1"]
TEACHER_FIELDS = {"TEACHER_ID": "CLP_TEACHER_ID", "ORGANISATION_NAME": "ORGANISATION_NAME",
"SCHOOL_NAME": "SCHOOL_NAME", "TITLE": "TITLE",
"LNAME": "FAMILY_NAME", "FNAME": "GIVEN_NAMES", "TEACHER_LANGUAGES": "LANGUAGES",
"P_ADDRESS1": "ADDRESS1", "P_ADDRESS2": "ADDRESS2", "P_SUBURB": "SUBURB",
"P_STATE": "STATE", "P_POSTCODE": "POSTCODE",
"TELEPHONE": "DAY_PHONE", "TEL_EVENING": "EVENING_PHONE", "EMAIL": "EMAIL", "MOBILE": "MOBILE",
"LEVEL_TAUGHT": "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION": "EDUCATION_LEVEL",
"FIELD_OF_EDUCATION": "EDUCATION_FIELD", "DEGREE_COUNTRY": "EDUCATION_COUNTRY",
"DEGREE_YEAR": "EDUCATION_YEAR",
"ORGANISATION_ID": "ORGANISATION_ID", "SCHOOL_ID": "SCHOOL_ID", }
STUDENT_FIELDS = {"SCHOOL_NAME": "SCHOOL_NAME", "SCHOOL_ID": "SCHOOL_ID", "STUDENT_ID": "CLP_STUDENT_ID",
"STUDENT_SRN": "SRN", "LOCATION_NAME": "LOCATION",
"STUDENT_LNAME": "FAMILY_NAME", "STUDENT_FNAME": "GIVEN_NAMES", "DOB": "DATE_OF_BIRTH",
"TEL": "PHONE", "LOCATION_NAME_1": "DAY_SCHOOL", }
class Sheet:
"Data container object to hold the contents of one sheet within an excel spreadsheet"
def __init__(self, name, titles=None, rows=None):
self.name = name
self.titles = titles or []
self.rows = rows or []
def convert_row_to_dict(titles, row):
data = {}
for (i, cell) in enumerate(row):
if cell.Value is not None:
data[titles[i]] = str(cell.value)
return data
def convert_xlsx(xlsx_file):
"""Convert the given XLSX spreadsheet to iterable of Sheet objects,
in which row has been converted into a dictionary"""
work_book = load_workbook(filename=xlsx_file, read_only=True, data_only=True)
for sheet in work_book:
rows = [x for x in sheet.iter_rows()]
if rows:
titles = [cell.value for cell in rows[0]]
dicts = [convert_row_to_dict(titles, row) for row in rows[1:]]
yield Sheet(sheet.title, titles, dicts)
else:
yield Sheet(sheet.title)
def to_camel(s):
"""Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'"""
bits = [(x.lower() if i == 0 else x.title())
for (i, x) in enumerate(s.split("_"))]
return "".join(bits)
def relative_to_absolute(relative_path):
path_to_py = os.path.abspath(os.path.dirname(__file__))
return os.path.join(path_to_py, relative_path)
def extract(fields, row_as_dict):
data = {}
for (k, v) in fields.items():
data[to_camel(v)] = row_as_dict[k]
return data
def process_sheet(sheet, titles, field_defns):
if titles != sheet.titles:
print("Sheet doesn't have expected titles:", [(i, x) for (i, x) in enumerate(titles) if x != sheet.titles[i]])
return []
structs = [[extract(defn, x) for x in sheet.rows] for defn in field_defns]
return structs
def unique(key, dicts):
t = {x[key]: x for x in dicts}
return t.values()
def now_as_iso8601():
return datetime.datetime.now().replace(microsecond=0).isoformat() + "Z"
def inject_required(type_name, dicts):
"Inject the required fields that graphcool import required"
for x in dicts:
x["_typeName"] = type_name
x["id"] = cuid.cuid()
x["createdAt"] = x["updatedAt"] = now_as_iso8601()
return list(dicts)
def prepare_organisations(organisations):
unique_orgs = unique("clpOrganisationId", organisations)
fat_orgs = inject_required("ClpOrganisation", unique_orgs)
return fat_orgs
def prepare_schools(schools):
uniques = unique("clpSchoolId", schools)
injected = inject_required("ClpSchool", uniques)
return injected
def prepare_locations(locations):
# There are multiple locations, each of which is identitical except that for being related to a different school.
# We have to collect all the schools that meet at the same location.
uniques = {}
for x in locations:
# get an existing location with the given name, or add the new location
location = uniques.setdefault(x["name"], x)
related_schools = location.setdefault("schools", list())
related_schools.append(x.pop("clpSchoolId"))
injected = inject_required("ClpLocation", uniques.values())
# FIX THIS - Current extract doesn't include the CLP location id :( Make one up for the time being
for x in injected:
x["clpLocationId"] = cuid.cuid()
return injected
def convert_dob_to_datetime(s):
"Convert the string from 99/MON/YY to a ISO date"
dt = datetime.datetime.strptime(s, "%d/%b/%y")
return dt.isoformat() + ".0Z" # GraphCool import insists on microseconds, hence the ".0"
def prepare_students(students):
uniques = unique("clpStudentId", students)
injected = inject_required("ClpStudent", uniques)
for x in injected:
x["dateOfBirth"] = convert_dob_to_datetime(x["dateOfBirth"])
return injected
def prepare_teachers(teachers):
# Like locations, the same teacher can have multiple records,
# each of which is identitical except that for being related to a different school.
# We have to collect all the schools that the same teacher is teaching at.
uniques = {}
for x in teachers:
# get an existing teacher with that id, or add the new teacher record
teacher = uniques.setdefault(x["clpTeacherId"], x)
related_schools = teacher.setdefault("schools", list())
related_schools.append(x.pop("schoolId"))
injected = inject_required("ClpTeacher", uniques.values())
return injected
def extract_from_xlsx(file_path):
for sheet in convert_xlsx(file_path):
if sheet.name == "SCHOOL-ORG":
(organisations, schools, locations) = process_sheet(
sheet, SCHOOL_TITLES, [ORGANISATION_FIELDS, SCHOOL_FIELDS, LOCATION_FIELDS])
elif sheet.name == "Teacher":
(teachers, ) = process_sheet(sheet, TEACHER_TITLES, [TEACHER_FIELDS])
elif sheet.name == "Student":
(students, ) = process_sheet(sheet, STUDENT_TITLES, [STUDENT_FIELDS])
else:
print("Ignoring sheet:", sheet.name)
return (organisations, schools, locations, teachers, students)
def copy_without(dicts, *keys_to_remove):
"Return iterable that contains copies of the given dictionary with all the given keys removed"
copies = [x.copy() for x in dicts]
for d in copies:
for to_remove in keys_to_remove:
d.pop(to_remove, None)
return copies
def write_nodes(*list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + str(i), "nodes"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "nodes",
"values": one_list
}
f.write(json.dumps(nodes))
def write_relations(list_of_lists):
for (i, one_list) in enumerate(list_of_lists):
nodes_dir = relative_to_absolute(os.path.join(EXTRACT_OUTPUT_DIR + "-relations" + str(i), "relations"))
os.makedirs(nodes_dir, exist_ok=True)
path = os.path.join(nodes_dir, "1.json")
with open(path, "w") as f:
nodes = {
"valueType": "relations",
"values": list(one_list)
}
f.write(json.dumps(nodes))
def chunks(n, l):
"""Yield n successive similar-sized chunks from l."""
chunk_size = 1 + len(l) // n
for i in range(0, len(l), chunk_size):
yield l[i:i + chunk_size]
def prepare(raw_organisations, raw_schools, raw_locations, raw_teachers, raw_students):
return (
prepare_organisations(raw_organisations),
prepare_schools(raw_schools),
prepare_locations(raw_locations),
prepare_teachers(raw_teachers),
prepare_students(raw_students)
)
def make_relation(entity1, id1, field1, entity2, id2, field2):
return [
{"_typeName": entity1, "id": id1, "fieldName": field1},
{"_typeName": entity2, "id": id2, "fieldName": field2}
]
def generate_relations(organisations, schools, locations, teachers, students):
# Build school -> organisation relations
org_keys = {x["clpOrganisationId"]: x["id"] for x in organisations}
yield [make_relation("ClpOrganisation", org_keys[x["clpOrganisationId"]], "schools",
"ClpSchool", x["id"], "organisation") for x in schools]
# Build location -> school relations
school_keys = {x["clpSchoolId"]: x["id"] for x in schools}
yield [make_relation("ClpLocation", location["id"], "schools",
"ClpSchool", school_keys[schoolId], "locations")
for location in locations for schoolId in location.get("schools", [])]
# Build teacher -> school relations
yield [make_relation("ClpTeacher", teacher["id"], "schools",
"ClpSchool", school_keys[schoolId], "teachers")
for teacher in teachers for schoolId in teacher.get("schools", [])]
# Build student -> school relations
yield [make_relation("ClpStudent", student["id"], "school",
"ClpSchool", school_keys[student["schoolId"]], "students")
for student in students if student["schoolId"] in school_keys]
def main():
xlsx_path = relative_to_absolute(SOURCE_XLSX)
raw_collections = extract_from_xlsx(xlsx_path)
(organisations, schools, locations, teachers, students) = prepare(*raw_collections)
write_nodes(
organisations,
copy_without(schools, "clpOrganisationId"),
copy_without(locations, "schools"),
copy_without(teachers, "organisationId", "organisationName", "schools", "schoolName"),
*chunks(3, copy_without(students, "schoolId", "schoolName", "location")))
write_relations(generate_relations(organisations, schools, locations, teachers, students))
if __name__ == "__main__":
main()
| 42.090604
| 118
| 0.64817
| 1,539
| 12,543
| 5.050682
| 0.192982
| 0.005403
| 0.009263
| 0.01904
| 0.192976
| 0.138942
| 0.117587
| 0.091599
| 0.072302
| 0.072302
| 0
| 0.005038
| 0.224508
| 12,543
| 297
| 119
| 42.232323
| 0.794078
| 0.119349
| 0
| 0.12037
| 0
| 0
| 0.259518
| 0.002125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115741
| false
| 0
| 0.027778
| 0.013889
| 0.236111
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1c12c2f6792d992cfb44ac67b60bca865f920c
| 6,148
|
py
|
Python
|
fHDHR/origin/origin_channels.py
|
crackers8199/fHDHR_USTVGO
|
50e284fe004c8b60b07dbe29fa3fb4f69a7b3cfa
|
[
"WTFPL"
] | null | null | null |
fHDHR/origin/origin_channels.py
|
crackers8199/fHDHR_USTVGO
|
50e284fe004c8b60b07dbe29fa3fb4f69a7b3cfa
|
[
"WTFPL"
] | null | null | null |
fHDHR/origin/origin_channels.py
|
crackers8199/fHDHR_USTVGO
|
50e284fe004c8b60b07dbe29fa3fb4f69a7b3cfa
|
[
"WTFPL"
] | null | null | null |
import os
import sys
from lxml import html
import pathlib
import json
import m3u8
from seleniumwire import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.firefox.options import Options as FirefoxOptions
IFRAME_CSS_SELECTOR = '.iframe-container>iframe'
# Disable
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
class OriginChannels():
def __init__(self, fhdhr, origin):
self.fhdhr = fhdhr
self.origin = origin
self.cache_dir = self.fhdhr.config.dict["filedir"]["epg_cache"]["origin"]["top"]
self.m3ucache = pathlib.Path(self.cache_dir).joinpath('m3ucache.json')
self.cached_m3u = {}
self.load_m3u_cache()
def load_m3u_cache(self):
if os.path.isfile(self.m3ucache):
self.fhdhr.logger.info("Loading Previously Saved Channel m3u.")
with open(self.m3ucache, 'r') as m3ufile:
self.cached_m3u = json.load(m3ufile)
def save_m3u_cache(self):
self.fhdhr.logger.info("Saving Channel m3u cache.")
with open(self.m3ucache, 'w') as m3ufile:
m3ufile.write(json.dumps(self.cached_m3u, indent=4))
def get_channels(self):
channel_list = []
chan_names, chan_urls = self.scrape_channels()
chan_number_index = 1
for name, url in zip(chan_names, chan_urls):
chan_dict = {
"name": name.rstrip(),
"number": chan_number_index,
"callsign": self.format_callsign(url),
}
channel_list.append(chan_dict)
chan_number_index += 1
return channel_list
def get_channel_stream(self, chandict, allchandict):
caching = True
streamlist = []
streamdict = {}
if chandict["callsign"] in list(self.cached_m3u):
streamurl = self.cached_m3u[chandict["callsign"]]
else:
streamurl = self.get_ustvgo_stream(chandict)
# if self.fhdhr.config.dict["origin"]["force_best"]:
streamurl = self.m3u8_beststream(streamurl)
streamdict = {"number": chandict["number"], "stream_url": streamurl}
streamlist.append(streamdict)
return streamlist, caching
def m3u8_beststream(self, m3u8_url):
bestStream = None
videoUrlM3u = m3u8.load(m3u8_url)
if not videoUrlM3u.is_variant:
return m3u8_url
for videoStream in videoUrlM3u.playlists:
if not bestStream:
bestStream = videoStream
elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth:
bestStream = videoStream
if not bestStream:
return bestStream.absolute_uri
else:
return m3u8_url
def scrape_channels(self):
channels_url = "https://ustvgo.tv/"
chanpage = self.fhdhr.web.session.get(channels_url)
tree = html.fromstring(chanpage.content)
channel_names_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/text()"
channel_urls_xpath = "/html/body/div[1]/div[1]/div/div[2]/div/div/div/article/div[1]/ol/li[*]/strong/a/@href"
chan_names = tree.xpath(channel_names_xpath)
chan_urls = tree.xpath(channel_urls_xpath)
return chan_names, chan_urls
def format_callsign(self, url):
callsign = (url
.split('/')[-2]
.replace('-live', '')
.replace('-channel', '')
.replace('-free', '')
.replace('-streaming', ''))
return callsign
def get_ustvgo_stream(self, chandict):
driver = self.get_firefox_driver()
blockPrint()
driver.get("https://ustvgo.tv/" + chandict["callsign"])
enablePrint()
# Get iframe
iframe = None
try:
iframe = driver.find_element_by_css_selector(IFRAME_CSS_SELECTOR)
except NoSuchElementException:
self.fhdhr.logger.error('Video frame is not found for channel')
return None
# Detect VPN-required channels
try:
driver.switch_to.frame(iframe)
driver.find_element_by_xpath("//*[text()='This channel requires our VPN to watch!']")
need_vpn = True
except NoSuchElementException:
need_vpn = False
finally:
driver.switch_to.default_content()
if need_vpn:
self.fhdhr.logger.warning('Channel needs VPN to be grabbed.')
return None
# Autoplay
iframe.click()
try:
playlist = driver.wait_for_request('/playlist.m3u8', timeout=10)
except TimeoutException:
self.fhdhr.logger.error('Channel m3u8 not found.')
return None
streamurl = str(playlist)
driver.close()
driver.quit()
self.cached_m3u[chandict["callsign"]] = streamurl
self.save_m3u_cache()
return streamurl
def get_firefox_driver(self):
ff_options = FirefoxOptions()
ff_options.add_argument('--headless')
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference('permissions.default.image', 2)
firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
firefox_profile.set_preference('dom.disable_beforeunload', True)
firefox_profile.set_preference('browser.tabs.warnOnClose', False)
firefox_profile.set_preference('media.volume_scale', '0.0')
set_seleniumwire_options = {
'connection_timeout': None,
'verify_ssl': False,
'suppress_connection_errors': True
}
driver = webdriver.Firefox(seleniumwire_options=set_seleniumwire_options, options=ff_options, firefox_profile=firefox_profile)
return driver
| 33.78022
| 134
| 0.609141
| 666
| 6,148
| 5.430931
| 0.304805
| 0.024883
| 0.021565
| 0.037324
| 0.089301
| 0.032624
| 0.032624
| 0.032624
| 0.032624
| 0.032624
| 0
| 0.01393
| 0.287736
| 6,148
| 181
| 135
| 33.966851
| 0.812058
| 0.018705
| 0
| 0.116788
| 0
| 0.014599
| 0.132592
| 0.055924
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087591
| false
| 0
| 0.065693
| 0
| 0.248175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1e22f8f6e931aec05c9d718e0438f67bfcceaf
| 6,950
|
py
|
Python
|
funcx_endpoint/funcx_endpoint/strategies/base.py
|
arokem/funcX
|
bd45b93f6c5a1676735b6f8246312d6b468a4b20
|
[
"Apache-1.1"
] | 1
|
2021-01-18T21:36:22.000Z
|
2021-01-18T21:36:22.000Z
|
funcx_endpoint/funcx_endpoint/strategies/base.py
|
Loonride/funcX
|
95ae788eac14397a5ec042f0a2ad05c14030b807
|
[
"Apache-1.1"
] | null | null | null |
funcx_endpoint/funcx_endpoint/strategies/base.py
|
Loonride/funcX
|
95ae788eac14397a5ec042f0a2ad05c14030b807
|
[
"Apache-1.1"
] | null | null | null |
import sys
import threading
import logging
import time
logger = logging.getLogger("interchange.strategy.base")
class BaseStrategy(object):
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Parameters
----------
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interchange = None
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.callback = self.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
def start(self, interchange):
"""Actually start the strategy
Parameters
----------
interchange: funcx.executors.high_throughput.interchange.Interchange
Interchange to bind the strategy to
"""
self.interchange = interchange
if hasattr(interchange.config, 'provider'):
logger.debug("Strategy bounds-> init:{}, min:{}, max:{}".format(
interchange.config.provider.init_blocks,
interchange.config.provider.min_blocks,
interchange.config.provider.max_blocks))
self._thread.start()
def strategize(self, *args, **kwargs):
""" Strategize is called everytime the threshold or the interval is hit
"""
logger.debug("Strategize called with {} {}".format(args, kwargs))
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event.
This method is to be called from the Interchange to notify the flowcontrol
"""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = []
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer(object):
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind='timer')
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
"""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate.
"""
self._kill_event.set()
self._thread.join()
| 32.325581
| 93
| 0.608633
| 841
| 6,950
| 4.895363
| 0.254459
| 0.018946
| 0.02429
| 0.027204
| 0.50838
| 0.50838
| 0.50838
| 0.504494
| 0.504494
| 0.483119
| 0
| 0.002077
| 0.307338
| 6,950
| 214
| 94
| 32.476636
| 0.853137
| 0.457266
| 0
| 0.589744
| 0
| 0
| 0.053842
| 0.007562
| 0
| 0
| 0
| 0.004673
| 0
| 1
| 0.141026
| false
| 0
| 0.051282
| 0
| 0.24359
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1e3877d30a492ceb0b5445e7d1d835bd228d55
| 7,409
|
py
|
Python
|
hw3 cnn and vis/gradcam.py
|
mtang1001/ML-Exploration
|
6fec422eca127210e948945e6d15526947bfae8e
|
[
"Apache-2.0"
] | null | null | null |
hw3 cnn and vis/gradcam.py
|
mtang1001/ML-Exploration
|
6fec422eca127210e948945e6d15526947bfae8e
|
[
"Apache-2.0"
] | null | null | null |
hw3 cnn and vis/gradcam.py
|
mtang1001/ML-Exploration
|
6fec422eca127210e948945e6d15526947bfae8e
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from captum.attr import GuidedGradCam, GuidedBackprop
from captum.attr import LayerActivation, LayerConductance, LayerGradCam
from data_utils import *
from image_utils import *
from captum_utils import *
import numpy as np
from visualizers import GradCam
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
X, y, class_names = load_imagenet_val(num=5)
# FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this.
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
gc = GradCam()
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
# Guided Back-Propagation
gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gbp_result.shape[0]):
plt.subplot(1, 5, i + 1)
img = gbp_result[i]
img = rescale(img)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_backprop.png')
# GradCam
# GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
for param in gc_model.parameters():
param.requires_grad = True
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gradcam_val = gradcam_result[i]
img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)
img = img / np.max(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/gradcam.png')
# As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam.
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gbp_val = gbp_result[i]
gradcam_val = np.expand_dims(gradcam_result[i], axis=2)
# Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines)
img = gradcam_val * gbp_val
img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img = np.float32(img)
img = torch.from_numpy(img)
img = deprocess(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_gradcam.png')
# **************************************************************************************** #
# Captum
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
conv_module = model.features[12]
##############################################################################
# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #
# visualize_attr_maps function from captum_utils.py is useful for #
# visualizing captum outputs #
# Use conv_module as the convolution layer for gradcam #
##############################################################################
# Computing Guided GradCam
ggc = GuidedGradCam(model, conv_module)
attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)
# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)
visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])
# Computing Guided BackProp
gbp = GuidedBackprop(model)
attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)
visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Try out different layers and see observe how the attributions change
layer = model.features[3]
# Example visualization for using layer visualizations
# layer_act = LayerActivation(model, layer)
# layer_act_attr = compute_attributions(layer_act, X_tensor)
# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)
##############################################################################
# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #
# to what we did for the other captum sections, using our helper methods), #
# but with some preprocessing calculations. #
# #
# You can refer to the LayerActivation example above and you should be #
# using 'layer' given above for this section #
# #
# Also note that, you would need to customize your 'attr_preprocess' #
# parameter that you send along to 'visualize_attr_maps' as the default #
# 'attr_preprocess' is written to only to handle multi channel attributions. #
# #
# For layer gradcam look at the usage of the parameter relu_attributions #
##############################################################################
# Layer gradcam aggregates across all channels
from captum.attr import LayerAttribution
N, C, H, W = X_tensor.shape
LC = LayerConductance(model, layer)
LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)
LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)
LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )
LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])
LGC = LayerGradCam(model, layer)
LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)
LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)
LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))
LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
| 41.623596
| 137
| 0.626535
| 948
| 7,409
| 4.728903
| 0.251055
| 0.023422
| 0.022753
| 0.013384
| 0.337497
| 0.293553
| 0.26277
| 0.252063
| 0.229757
| 0.229757
| 0
| 0.01096
| 0.187205
| 7,409
| 177
| 138
| 41.858757
| 0.733477
| 0.341342
| 0
| 0.351064
| 0
| 0
| 0.082833
| 0.052821
| 0
| 0
| 0
| 0.00565
| 0
| 1
| 0
| false
| 0
| 0.138298
| 0
| 0.138298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a1eaf6b7e32695b5e6a96b0eee80707d820de35
| 9,462
|
py
|
Python
|
colab_logica.py
|
smdesai/logica
|
ad099bcd6064e38e9c2bc9a99564832857c0768c
|
[
"Apache-2.0"
] | null | null | null |
colab_logica.py
|
smdesai/logica
|
ad099bcd6064e38e9c2bc9a99564832857c0768c
|
[
"Apache-2.0"
] | null | null | null |
colab_logica.py
|
smdesai/logica
|
ad099bcd6064e38e9c2bc9a99564832857c0768c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for using Logica in CoLab."""
from .common import color
from .common import concertina_lib
from .compiler import functors
from .compiler import rule_translate
from .compiler import universe
import IPython
from IPython.core.magic import register_cell_magic
from IPython.display import display
import os
import pandas
from .parser_py import parse
from .common import sqlite3_logica
BQ_READY = True # By default.
try:
from google.cloud import bigquery
except:
BQ_READY = False
print('Could not import google.cloud.bigquery.')
try:
from google.colab import auth
except:
BQ_READY = False
print('Could not import google.cloud.auth.')
try:
from google.colab import widgets
WIDGETS_IMPORTED = True
except:
WIDGETS_IMPORTED = False
print('Could not import google.colab.widgets.')
PROJECT = None
# TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION?
DB_ENGINE = None
DB_CONNECTION = None
USER_AUTHENTICATED = False
TABULATED_OUTPUT = True
SHOW_FULL_QUERY = True
PREAMBLE = None
def SetPreamble(preamble):
global PREAMBLE
PREAMBLE = preamble
def SetProject(project):
global PROJECT
PROJECT = project
def SetDbConnection(connection):
global DB_CONNECTION
DB_CONNECTION = connection
def EnsureAuthenticatedUser():
global USER_AUTHENTICATED
global PROJECT
if USER_AUTHENTICATED:
return
auth.authenticate_user()
if PROJECT is None:
print("Please enter project_id to use for BigQuery queries.")
PROJECT = input()
print("project_id is set to %s" % PROJECT)
print("You can change it with logica.colab_logica.SetProject command.")
USER_AUTHENTICATED = True
def SetTabulatedOutput(tabulated_output):
global TABULATED_OUTPUT
global SHOW_FULL_QUERY
TABULATED_OUTPUT = tabulated_output
SHOW_FULL_QUERY = TABULATED_OUTPUT
if not WIDGETS_IMPORTED:
SetTabulatedOutput(False)
def TabBar(*args):
"""Returns a real TabBar or a mock. Useful for UIs that don't support JS."""
if TABULATED_OUTPUT:
return widgets.TabBar(*args)
class MockTab:
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *x):
pass
class MockTabBar:
def __init__(self):
pass
def output_to(self, x):
return MockTab()
return MockTabBar()
@register_cell_magic
def logica(line, cell):
Logica(line, cell, run_query=True)
def ParseList(line):
line = line.strip()
if not line:
predicates = []
else:
predicates = [p.strip() for p in line.split(',')]
return predicates
def RunSQL(sql, engine, connection=None, is_final=False):
if engine == 'bigquery':
client = bigquery.Client(project=PROJECT)
return client.query(sql).to_dataframe()
elif engine == 'psql':
if is_final:
return pandas.read_sql(sql, connection)
else:
return connection.execute(sql)
elif engine == 'sqlite':
statements = parse.SplitRaw(sql, ';')
connection.executescript(sql)
if is_final:
return pandas.read_sql(statements[-1], connection)
else:
pass
return None
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
class SqliteRunner(object):
def __init__(self):
self.connection = sqlite3_logica.SqliteConnect()
# TODO: Sqlite runner should not be accepting an engine.
def __call__(self, sql, engine, is_final):
return RunSQL(sql, engine, self.connection, is_final)
class PostgresRunner(object):
def __init__(self):
global DB_CONNECTION
global DB_ENGINE
if DB_CONNECTION:
self.engine = DB_ENGINE
self.connection = DB_CONNECTION
else:
(self.engine, self.connection) = PostgresJumpStart()
DB_ENGINE = self.engine
DB_CONNECTION = self.connection
def __call__(self, sql, engine, is_final):
return RunSQL(sql, engine, self.connection, is_final)
def ShowError(error_text):
print(color.Format('[ {error}Error{end} ] ' + error_text))
def Logica(line, cell, run_query):
"""Running Logica predicates and storing results."""
predicates = ParseList(line)
if not predicates:
ShowError('No predicates to run.')
return
try:
program = ';\n'.join(s for s in [PREAMBLE, cell] if s)
parsed_rules = parse.ParseFile(program)['rule']
except parse.ParsingException as e:
e.ShowMessage()
return
try:
program = universe.LogicaProgram(parsed_rules)
except functors.FunctorError as e:
e.ShowMessage()
return
engine = program.annotations.Engine()
if engine == 'bigquery' and not BQ_READY:
ShowError(
'BigQuery client and/or authentification is not installed. \n'
'It is the easiest to run BigQuery requests from Google CoLab:\n'
' https://colab.research.google.com/.\n'
'Note that running Logica on SQLite requires no installation.\n'
'This could be a good fit for working with small data or learning Logica.\n'
'Use {warning}@Engine("sqlite");{end} annotation in your program to use SQLite.')
return
bar = TabBar(predicates + ['(Log)'])
logs_idx = len(predicates)
executions = []
sub_bars = []
ip = IPython.get_ipython()
for idx, predicate in enumerate(predicates):
with bar.output_to(logs_idx):
try:
sql = program.FormattedPredicateSql(predicate)
executions.append(program.execution)
ip.push({predicate + '_sql': sql})
except rule_translate.RuleCompileException as e:
print('Encountered error when compiling %s.' % predicate)
e.ShowMessage()
return
# Publish output to Colab cell.
with bar.output_to(idx):
sub_bar = TabBar(['SQL', 'Result'])
sub_bars.append(sub_bar)
with sub_bar.output_to(0):
if SHOW_FULL_QUERY:
print(
color.Format(
'The following query is stored at {warning}%s{end} '
'variable.' % (
predicate + '_sql')))
print(sql)
else:
print('Query is stored at %s variable.' %
color.Warn(predicate + '_sql'))
with bar.output_to(logs_idx):
if engine == 'sqlite':
sql_runner = SqliteRunner()
elif engine == 'psql':
sql_runner = PostgresRunner()
elif engine == 'bigquery':
EnsureAuthenticatedUser()
sql_runner = RunSQL
else:
raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite '
'for now.')
result_map = concertina_lib.ExecuteLogicaProgram(
executions, sql_runner=sql_runner, sql_engine=engine)
for idx, predicate in enumerate(predicates):
t = result_map[predicate]
ip.push({predicate: t})
with bar.output_to(idx):
with sub_bars[idx].output_to(1):
if run_query:
print(
color.Format(
'The following table is stored at {warning}%s{end} '
'variable.' %
predicate))
display(t)
else:
print('The query was not run.')
print(' ') # To activate the tabbar.
def PostgresJumpStart():
# Install postgresql server.
print("Installing and configuring an empty PostgreSQL database.")
result = 0
result += os.system('sudo apt-get -y -qq update')
result += os.system('sudo apt-get -y -qq install postgresql')
result += os.system('sudo service postgresql start')
# Ignoring user creation error, as they may already exist.
result += 0 * os.system(
'sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER"')
result += os.system(
'sudo -u postgres psql -c "ALTER USER logica PASSWORD \'logica\';"')
result += os.system(
'sudo -u postgres psql -U postgres -c \'CREATE DATABASE logica;\'')
if result != 0:
print("""Installation failed. Please try the following manually:
# Install Logica.
!pip install logica
# Install postgresql server.
!sudo apt-get -y -qq update
!sudo apt-get -y -qq install postgresql
!sudo service postgresql start
# Prepare database for Logica.
!sudo -u postgres psql -c "CREATE USER logica WITH SUPERUSER"
!sudo -u postgres psql -c "ALTER USER logica PASSWORD 'logica';"
!sudo -u postgres psql -U postgres -c 'CREATE DATABASE logica;'
# Connect to the database.
from logica import colab_logica
from sqlalchemy import create_engine
import pandas
engine = create_engine('postgresql+psycopg2://logica:[email protected]', pool_recycle=3600);
connection = engine.connect();
colab_logica.SetDbConnection(connection)""")
return
print('Installation succeeded. Connecting...')
# Connect to the database.
from logica import colab_logica
from sqlalchemy import create_engine
import pandas
engine = create_engine('postgresql+psycopg2://logica:[email protected]', pool_recycle=3600)
connection = engine.connect()
print('Connected.')
return engine, connection
| 28.672727
| 92
| 0.686007
| 1,208
| 9,462
| 5.25745
| 0.25745
| 0.010077
| 0.011337
| 0.01606
| 0.282475
| 0.240277
| 0.20296
| 0.181074
| 0.160605
| 0.160605
| 0
| 0.005134
| 0.217713
| 9,462
| 329
| 93
| 28.759878
| 0.852878
| 0.106743
| 0
| 0.288538
| 0
| 0.003953
| 0.260518
| 0.02995
| 0
| 0
| 0
| 0.00304
| 0
| 1
| 0.083004
| false
| 0.027668
| 0.106719
| 0.011858
| 0.280632
| 0.071146
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a20c183c03d4133fca24e84a8755331075102c6
| 1,195
|
py
|
Python
|
alibi_detect/utils/tests/test_discretize.py
|
Clusks/alibi-detect
|
b39406a6cf88f315f401562d4fea93a42aa6dcc1
|
[
"ECL-2.0",
"Apache-2.0",
"CC0-1.0"
] | 1,227
|
2019-11-19T15:38:40.000Z
|
2022-03-31T11:18:32.000Z
|
alibi_detect/utils/tests/test_discretize.py
|
Clusks/alibi-detect
|
b39406a6cf88f315f401562d4fea93a42aa6dcc1
|
[
"ECL-2.0",
"Apache-2.0",
"CC0-1.0"
] | 323
|
2019-11-21T18:41:00.000Z
|
2022-03-31T21:08:56.000Z
|
alibi_detect/utils/tests/test_discretize.py
|
Clusks/alibi-detect
|
b39406a6cf88f315f401562d4fea93a42aa6dcc1
|
[
"ECL-2.0",
"Apache-2.0",
"CC0-1.0"
] | 133
|
2019-11-19T14:23:23.000Z
|
2022-03-31T07:55:43.000Z
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
| 31.447368
| 85
| 0.663598
| 181
| 1,195
| 4.232044
| 0.370166
| 0.036554
| 0.093995
| 0.031332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024565
| 0.182427
| 1,195
| 37
| 86
| 32.297297
| 0.759468
| 0
| 0
| 0
| 0
| 0
| 0.016736
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a247bd11d82f9ea0cd74cd38836b820c3903839
| 5,048
|
py
|
Python
|
interpretable_ddts/runfiles/gym_runner.py
|
CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020
|
a7fde4d2a7d70477b2e6c96b140f8c6587f78791
|
[
"MIT"
] | 5
|
2021-08-11T14:58:36.000Z
|
2022-02-12T06:12:19.000Z
|
interpretable_ddts/runfiles/gym_runner.py
|
CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020
|
a7fde4d2a7d70477b2e6c96b140f8c6587f78791
|
[
"MIT"
] | null | null | null |
interpretable_ddts/runfiles/gym_runner.py
|
CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020
|
a7fde4d2a7d70477b2e6c96b140f8c6587f78791
|
[
"MIT"
] | 4
|
2020-10-21T03:57:52.000Z
|
2021-06-28T08:08:05.000Z
|
# Created by Andrew Silva on 8/28/19
import gym
import numpy as np
import torch
from interpretable_ddts.agents.ddt_agent import DDTAgent
from interpretable_ddts.agents.mlp_agent import MLPAgent
from interpretable_ddts.opt_helpers.replay_buffer import discount_reward
import torch.multiprocessing as mp
import argparse
import copy
import random
def run_episode(q, agent_in, ENV_NAME, seed=0):
agent = agent_in.duplicate()
if ENV_NAME == 'lunar':
env = gym.make('LunarLander-v2')
elif ENV_NAME == 'cart':
env = gym.make('CartPole-v1')
else:
raise Exception('No valid environment selected')
done = False
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
env.action_space.seed(seed)
random.seed(seed)
state = env.reset() # Reset environment and record the starting state
while not done:
action = agent.get_action(state)
# Step through environment using chosen action
state, reward, done, _ = env.step(action)
# env.render()
# Save reward
agent.save_reward(reward)
if done:
break
reward_sum = np.sum(agent.replay_buffer.rewards_list)
rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list,
agent.replay_buffer.value_list,
agent.replay_buffer.deeper_value_list)
agent.replay_buffer.rewards_list = rewards_list
agent.replay_buffer.advantage_list = advantage_list
agent.replay_buffer.deeper_advantage_list = deeper_advantage_list
to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())]
if q is not None:
try:
q.put(to_return)
except RuntimeError as e:
print(e)
return to_return
return to_return
def main(episodes, agent, ENV_NAME):
running_reward_array = []
for episode in range(episodes):
reward = 0
returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME)
reward += returned_object[0]
running_reward_array.append(returned_object[0])
agent.replay_buffer.extend(returned_object[1])
if reward >= 499:
agent.save('../models/'+str(episode)+'th')
agent.end_episode(reward)
running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array)))
if episode % 50 == 0:
print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}')
if episode % 500 == 0:
agent.save('../models/'+str(episode)+'th')
return running_reward_array
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt')
parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000)
parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8)
parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0)
parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart')
parser.add_argument("-gpu", help="run on GPU?", action='store_true')
args = parser.parse_args()
AGENT_TYPE = args.agent_type # 'ddt', 'mlp'
NUM_EPS = args.episodes # num episodes Default 1000
ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'
USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false
if ENV_TYPE == 'lunar':
init_env = gym.make('LunarLander-v2')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
elif ENV_TYPE == 'cart':
init_env = gym.make('CartPole-v1')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
else:
raise Exception('No valid environment selected')
print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ")
# mp.set_start_method('spawn')
mp.set_sharing_strategy('file_system')
for i in range(5):
bot_name = AGENT_TYPE + ENV_TYPE
if USE_GPU:
bot_name += 'GPU'
if AGENT_TYPE == 'ddt':
policy_agent = DDTAgent(bot_name=bot_name,
input_dim=dim_in,
output_dim=dim_out,
rule_list=False,
num_rules=args.num_leaves)
elif AGENT_TYPE == 'mlp':
policy_agent = MLPAgent(input_dim=dim_in,
bot_name=bot_name,
output_dim=dim_out,
num_hidden=args.num_hidden)
else:
raise Exception('No valid network selected')
reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
| 40.709677
| 112
| 0.621434
| 642
| 5,048
| 4.641745
| 0.280374
| 0.040268
| 0.051342
| 0.035235
| 0.215772
| 0.113423
| 0.095302
| 0.039597
| 0.039597
| 0.039597
| 0
| 0.012028
| 0.275357
| 5,048
| 123
| 113
| 41.04065
| 0.802624
| 0.059826
| 0
| 0.141509
| 0
| 0
| 0.113411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.09434
| 0
| 0.141509
| 0.028302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a263ee52f1bcf865cb343ad7cbe07411cfb3a5e
| 1,534
|
py
|
Python
|
Week 08/tw10_words_by_prefix.py
|
andrewn488/OMSBA-5061
|
8e57fff45d8965b0423a6fe338bd74cedfe94ea0
|
[
"MIT"
] | null | null | null |
Week 08/tw10_words_by_prefix.py
|
andrewn488/OMSBA-5061
|
8e57fff45d8965b0423a6fe338bd74cedfe94ea0
|
[
"MIT"
] | null | null | null |
Week 08/tw10_words_by_prefix.py
|
andrewn488/OMSBA-5061
|
8e57fff45d8965b0423a6fe338bd74cedfe94ea0
|
[
"MIT"
] | 1
|
2022-02-07T02:42:43.000Z
|
2022-02-07T02:42:43.000Z
|
""" TW10: Words by Prefix
Team: Tam Tamura, Andrew Nalundasan
For: OMSBA 2061, Seattle University
Date: 11/3/2020
"""
def wordByPrefix(prefix_length, word):
my_dict = {}
for key in word:
for letter in word:
prefix_key = letter[:prefix_length]
letter = word[:prefix_length]
return prefix_key
return letter
question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree']
my_list = []
for elem in question_2:
prefix = elem[:2]
my_list.append(prefix)
print(my_list)
def question_3(prefix_length, word):
my_list = []
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
return my_list
def wordByPrefix(prefix_length, word):
my_list = []
#count = 0
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
count = {}
for letter in my_list:
if letter.isalpha():
if letter not in count:
count[letter] = 0
count[letter] += 1
return count
def wordByPrefix(prefix_length, word):
my_list = []
#count = 0
for key in word:
prefix = key[:prefix_length]
my_list.append(prefix)
count = {}
for letter in my_list:
if letter.isalpha():
if letter not in count:
letter[count] = []
count.update(letter)
return count
| 24.741935
| 82
| 0.544329
| 181
| 1,534
| 4.464088
| 0.265193
| 0.089109
| 0.079208
| 0.089109
| 0.5
| 0.472772
| 0.431931
| 0.431931
| 0.431931
| 0.431931
| 0
| 0.021341
| 0.35854
| 1,534
| 61
| 83
| 25.147541
| 0.799797
| 0.083442
| 0
| 0.636364
| 0
| 0
| 0.02994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.204545
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a26a5869fd7404e249d795b4a225c3eca2ac49a
| 2,683
|
py
|
Python
|
openff/bespokefit/executor/services/qcgenerator/cache.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 12
|
2020-08-28T20:49:00.000Z
|
2021-11-17T08:50:32.000Z
|
openff/bespokefit/executor/services/qcgenerator/cache.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 95
|
2020-02-19T18:40:54.000Z
|
2021-12-02T10:52:23.000Z
|
openff/bespokefit/executor/services/qcgenerator/cache.py
|
openforcefield/openff-bespokefit
|
85c92a51055a5a82e5d50fee1668a7de4ce2b1d4
|
[
"MIT"
] | 3
|
2021-04-01T04:22:49.000Z
|
2021-04-13T03:19:10.000Z
|
import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def _canonicalize_task(task: _T) -> _T:
task = task.copy(deep=True)
# Ensure the SMILES has a canonical ordering to help ensure cache hits.
canonical_molecule = canonical_order_atoms(
Molecule.from_smiles(task.smiles, allow_undefined_stereo=True)
)
if isinstance(task, Torsion1DTask):
map_to_atom_index = {
j: i for i, j in canonical_molecule.properties["atom_map"].items()
}
central_atom_indices = sorted(
map_to_atom_index[task.central_bond[i]] for i in (0, 1)
)
canonical_molecule.properties["atom_map"] = {
atom_index: (i + 1) for i, atom_index in enumerate(central_atom_indices)
}
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
task.central_bond = (1, 2)
else:
canonical_smiles = canonical_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=False
)
task.smiles = canonical_smiles
return task
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
| 31.197674
| 87
| 0.706299
| 336
| 2,683
| 5.449405
| 0.369048
| 0.022938
| 0.032769
| 0.031677
| 0.15183
| 0.086292
| 0.086292
| 0.086292
| 0.086292
| 0.086292
| 0
| 0.006155
| 0.212821
| 2,683
| 85
| 88
| 31.564706
| 0.860795
| 0.13306
| 0
| 0.111111
| 0
| 0
| 0.032496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.12963
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a277a87fbb9f9430d9ecdf658e9964b1157dc17
| 3,951
|
py
|
Python
|
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py
|
jrzeszutek/cloudify-training-labs
|
5477750d269cb703ce47e35a1c13749fc88f3f6f
|
[
"Apache-2.0"
] | 6
|
2015-07-06T01:10:08.000Z
|
2016-12-21T15:42:07.000Z
|
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py
|
jrzeszutek/cloudify-training-labs
|
5477750d269cb703ce47e35a1c13749fc88f3f6f
|
[
"Apache-2.0"
] | 4
|
2015-08-25T06:32:36.000Z
|
2016-09-07T07:01:34.000Z
|
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py
|
jrzeszutek/cloudify-training-labs
|
5477750d269cb703ce47e35a1c13749fc88f3f6f
|
[
"Apache-2.0"
] | 14
|
2015-03-28T05:45:58.000Z
|
2017-02-14T02:22:09.000Z
|
'''Copyright Gigaspaces, 2017, All Rights Reserved'''
from cloudify.plugins import lifecycle
OP_START = 'hacker.interfaces.lifecycle.start'
OP_STOP = 'hacker.interfaces.lifecycle.stop'
OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'
OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'
REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP])
def build_instance_sequence(instance, operation,
state_start=None, state_end=None):
'''
Builds sequenced subgraph tasks for an instance
.. note::
The sequence will not be built if the instance provided
does not have a node with an operation defined in the
operation parameter.
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param str operation:
Node (lifecycle) operation to execute
:param str state_start:
Verb to describe operation start
:param str state_stop:
Verb to describe operation finish
'''
tasks = list()
# Only build the sequence if the node operation exists
if operation not in instance.node.operations:
return tasks
# Add task starting state
if state_start:
tasks.append(instance.send_event('%s host' % state_start))
tasks.append(instance.set_state(state_start.lower()))
# Add task operation
tasks.append(instance.execute_operation(operation))
# Add task ended state
if state_end:
tasks.append(instance.send_event('%s host' % state_end))
tasks.append(instance.set_state(state_end.lower()))
return tasks
def build_instance_subgraph(instance, graph):
'''
Builds a subgraph for an instance
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param `TaskDependencyGraph` graph:
Task graph to create sequences from
'''
# Init a "stop instance" subgraph
sg_stop = graph.subgraph('stop_subgraph')
seq_stop = sg_stop.sequence()
seq_stop.add(*build_instance_sequence(
instance, OP_STOP, 'Stopping', 'Stopped'))
# Init a "recreate snapshots" subgraph
sg_snap = graph.subgraph('snapshot_subgraph')
seq_snap = sg_snap.sequence()
if OP_SS_D in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_D))
if OP_SS_C in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_C))
# Init a "start instance" subgraph
sg_start = graph.subgraph('stop_subgraph')
seq_start = sg_start.sequence()
seq_start.add(*build_instance_sequence(
instance, OP_START, 'Starting', 'Started'))
# Create subgraph dependencies
graph.add_dependency(sg_snap, sg_stop)
graph.add_dependency(sg_start, sg_snap)
def refresh_snapshots(ctx, **_):
'''
Executes a complex, graph-based set of lifecycle events
to stop all host (compute) instances, delete all
existing instance snapshots, take new snapshots
of all attached volumes, and start the instances
back up when complete.
'''
graph = ctx.graph_mode()
# Find all compute hosts and build a sequence graph
for node in ctx.nodes:
if not REQUIRED_OPS.issubset(node.operations):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node "%s" because '
'it does not have all required operations defined' % node.id)
continue
# Iterate over each node instance
for instance in node.instances:
if not lifecycle.is_host_node(instance):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node instance '
'"%s" because it is not a compute host' % instance.id)
continue
build_instance_subgraph(instance, graph)
# Execute the sequences
return graph.execute()
| 37.628571
| 77
| 0.679069
| 497
| 3,951
| 5.229376
| 0.259557
| 0.012312
| 0.0404
| 0.055791
| 0.290112
| 0.232397
| 0.181608
| 0.181608
| 0.152366
| 0.112351
| 0
| 0.001336
| 0.241964
| 3,951
| 104
| 78
| 37.990385
| 0.866444
| 0.332068
| 0
| 0.115385
| 0
| 0
| 0.176637
| 0.061843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.019231
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a28f4c1d95b682b9a50e90e2f39fe8345b14eab
| 33,404
|
py
|
Python
|
File Transfer/Flyter/flyter.py
|
CryptoNyxz/Miscellaneous-Tools
|
797ea04d7c369469ab3d2a1ae2838c4a7b7b9c02
|
[
"MIT"
] | null | null | null |
File Transfer/Flyter/flyter.py
|
CryptoNyxz/Miscellaneous-Tools
|
797ea04d7c369469ab3d2a1ae2838c4a7b7b9c02
|
[
"MIT"
] | null | null | null |
File Transfer/Flyter/flyter.py
|
CryptoNyxz/Miscellaneous-Tools
|
797ea04d7c369469ab3d2a1ae2838c4a7b7b9c02
|
[
"MIT"
] | null | null | null |
"""
Flyter
Tool for transferring files on the same network using raw sockets.
Doesn't use encryption.
"""
__version__ = (0, 0, 0)
__author__ = "CryptoNyxz"
__license__ = """
MIT License
Copyright (c) 2021 Jaymund Cyrus F. Floranza
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from argparse import ArgumentParser
from base64 import b64encode
from datetime import timedelta
from math import log
from os import altsep, sep, \
mkdir, stat, unlink
from os.path import dirname, exists, join
from random import randint
from secrets import token_bytes
from shutil import get_terminal_size
from socket import \
socket, error, timeout, \
ntohs, ntohl, htons, htonl, \
gethostname, \
AF_INET, SOCK_STREAM
from threading import Thread
from time import time
from warnings import warn
from sys import argv, exit, version_info
if version_info < (3, 6):
warn('[!] Some features are not be compatible with the version of your '
'python interpreter')
FROMTERMINAL = False
# Utility Functions
def random_port(host):
"""Return a random available TCP port."""
while True:
port = randint(10_000, 65536)
with socket(AF_INET, SOCK_STREAM) as sock:
try:
sock.bind((host, port))
except error:
continue
else:
return port
def printerror(errormsg):
"""Print an error message."""
global FROMTERMINAL
if FROMTERMINAL:
print(f'\n[x] {errormsg}')
exit(-1)
exit(-1)
exit(-1)
exit(-1)
else:
warn(errormsg)
def printalert(alert):
"""Print an alert message."""
global FROMTERMINAL
print(f'[!] {alert}')
def int_to_bytes_s(integer):
"""Convert 16 - bit integer to bytes for packing."""
res = ntohs(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_s(byteseq):
"""Convert byte sequence to 16 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htons(res)
def int_to_bytes_l(integer):
"""Convert 32 - but integer to bytes for packing."""
res = ntohl(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_l(byteseq):
"""Convert byte sequence to 32 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htonl(res)
def pack_str(string):
"""Pack a string into a byte sequence."""
return string.encode()
def unpack_str(byteseq):
"""Unpack a byte sequence into a string."""
return byteseq.decode()
# Utility Classes
class ProgressBar:
"""
For displaying progress bars.
Parameters
----------
max_value : int, float
The upper limit of the progress bar.
length : :obj:`int`, optional
The length of the progress bar.
"""
@staticmethod
def byte_rescale(data, precision=1):
scale = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
p = int(log(data, 2)/10) if data else 0
r_bytes = round(data/pow(2, 10*p), precision)
return f"{r_bytes}{scale[p]}"
def __init__(self, max_value, length=50):
self.max_value = max_value
self.current_val = 0
self.length = length
self.rate = None
self.start_time = None
self.start_value = None
self.stopped = False
@property
def done(self):
"""Return if already finished."""
return self.current_val >= self.max_value or self.stopped
def start(self):
"""Start the progress bar."""
self.stopped = False
self.start_time = time()
self.start_value = self.current_val
def stop(self):
"""Stop the progress bar."""
self.stopped = True
def add_progress(self, value):
"""
Count new progress.
Parameter
---------
value : int, float
Added progress value.
"""
if self.stopped:
return
self.current_val += value
def display(self):
"""Display the current progress."""
if self.stopped:
return
d_value = self.current_val - self.start_value
d_max_value = self.max_value - self.start_value
d_time = time() - self.start_time
per = d_value/d_max_value
prog = int(self.length*per)
extra = self.length*round(per) > prog
prog_bar = '█'*prog + '▌'*extra
spaces = ' '*(self.length - (prog + extra))
rate = d_value/d_time if d_time else float('inf')
eta_s = round((d_max_value - d_value)/rate) if rate else \
None
eta = timedelta(seconds=eta_s) if eta_s is not None else '?'
clear_line = " "*(get_terminal_size().columns - 1)
print(f"{clear_line}\r"
"Progress: "
f"|{prog_bar}{spaces}| "
f"{100*per:.1f}% "
f"({ProgressBar.byte_rescale(d_value)}) "
f"[{ProgressBar.byte_rescale(rate)}/s] "
f"ETA: {eta}", end="\r")
# Flyter Classes
class FlyterSender:
"""
Handles Flyter file sending processes.
Note: Sends to FlyterReceiver instances.
Parameterss
----------
recver_ip : str
The IP address of the receiver.
main_port : int
The main TCP port of the receiver.
"""
DEFAULT_PACKET_SIZE = 1024
def __init__(self, recver_ip, main_port):
self.recver_ip = recver_ip
self.main_port = main_port
self.token = token_bytes(6)
self._recver_hostname = None
self._recver_token = None
self._transfer_type = None
self._worker_ports = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._sending_file = False
self._workers_active = 0
self._progress_bar = None
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.socket, socket):
self.socket.close()
def _send_s(self, filepath, file_size):
"""
Send a file with a single worker.
Parameters
----------
filepath : str
The filepath to the file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
return printerror("File doesn't exist")
self._sending_file = True
try:
fs = file_size
with open(filepath, 'br') as f:
while self._sending_file and fs:
packet = f.read(self._packet_size)
if not packet:
break
self.socket.send(packet)
assert self.socket.recv(1) == b'\x06' # ACK
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
except AssertionError:
self._progress_bar.stop()
return printerror("Receiver rejected packet")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Couldn't access file")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access file due to permission error")
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except:
self._progress_bar.stop()
return printerror(f"Error while sending file")
else:
self._sending_file = False
return True
def _send_m(self, filepath, file_sizes):
"""
Send a file with multiple workers.
Speeds up transmission rate by using multiple workers.
Parameters
----------
filepath : str
The filepath to the file to be sent.
file_sizes : list(int)
The sizes of the split-up file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
printerror("File doesn't exist")
def threadfunc(worker_num, fpath, start, end):
self._workers_active += 1
try:
with socket(AF_INET, SOCK_STREAM) as sock:
sock.connect(
(self.recver_ip, self._worker_ports[worker_num])
)
sock.send(self.token)
assert sock.recv(1) == b'\x06' # ACK
fs = end - start
with open(fpath, 'br') as f:
f.seek(start)
while self._sending_file and fs:
end_size = f.tell() + self._packet_size
size = (self._packet_size - max(0, end_size - end))
packet = f.read(size)
if not packet:
break
sock.send(packet)
assert sock.recv(1) == b'\x06' # ACK
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
except KeyboardInterrupt:
self._progress_bar.stop()
self._sending_file = False
return printerror("User aborted operation")
except AssertionError:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Receiver rejected packet")
except FileNotFoundError:
self._progress_bar.stop()
self._sending_file = False
return printerror("Couldn't access file")
except PermissionError:
self._progress_bar.stop()
self._sending_file = False
return printerror("Couldn't access file due to permission "
"error")
except timeout:
self._progress_bar.stop()
self._sending_file = False
return printerror("Operation timed out")
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Error while sending file")
finally:
self._workers_active -= 1
num_workers = len(self._worker_ports)
self._sending_file = True
try:
size = 0
for w in range(num_workers):
Thread(
target=threadfunc,
args=(
w, filepath,
size, size + file_sizes[w]
),
).start()
size += file_sizes[w]
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
except:
return printerror("Error while starting to send file")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._sending_file = False
return printerror("User aborted operation")
self._sending_file = False
return True
def send_file(self, filepath):
"""
Send a file.
Parameters
----------
filepath : str
The filepath of the file to be sent.
"""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
if not exists(filepath):
return printerror("File doesn't exist")
# Headers
try:
tok = self.token
num_w = max(1, len(self._worker_ports))
fpath = filepath.replace(altsep, sep)
fname = fpath.split(sep)[-1]
fsize = stat(fpath).st_size
fsizes = [fsize//num_w for w in range(num_w)]
fsizes[-1] += fsize - sum(fsizes)
fn = pack_str(fname)
len_fn = int_to_bytes_s(len(fn))
fs = [int_to_bytes_l(s) for s in fsizes]
fs = b''.join(fs)
len_fs = int_to_bytes_s(num_w)
headers = b''.join([tok, len_fn, fn, len_fs, fs])
except:
return printerror("Error while preparing headers")
try:
b64_tok = b64encode(self._recver_token).decode()
printalert(f"Sending to {self._recver_hostname}-{b64_tok}:"
f" [ {fname} ]")
self.socket.send(headers)
print("Waiting for receiver to accept file")
assert self.socket.recv(1) == b'\x06' # ACK
except KeyboardInterrupt:
return printerror("User aborted operation")
except AssertionError:
return printerror("Receiver rejected")
except timeout:
return printerror("Operation timed out")
except Exception:
return printerror("Error while sending headers to receiver")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now sending file ({ProgressBar.byte_rescale(fsize)})")
# Progress bar thread
self._progress_bar = ProgressBar(fsize, 40)
self._progress_bar.start()
def progress_thread():
try:
# Wait until sending file
while not self._sending_file:
pass
# Display until file is sent
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
# Start sending
res = None
try:
if self._transfer_type == 'S':
res = self._send_s(fpath, fsize)
elif self._transfer_type == 'M':
res = self._send_m(fpath, fsizes)
assert self.socket.recv(1) == b'\x06' # ACK
except:
self._progress_bar.stop()
self._sending_file = False
return printerror(f"Sending file was unsuccessful")
else:
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully sent: {fname}")
return res
def recv_param_set(self):
"""
Receive and unpack Receiver's parameter settings.
Used to set Sender's parameter settings used during data
transmissions.
"""
try:
self.socket.connect((self.recver_ip, self.main_port))
except error:
return printerror("Can't connect to "
f"{self.recver_ip}:{self.main_port}")
try:
sender_hn = pack_str(gethostname())
len_sender_hn = int_to_bytes_s(len(sender_hn))
self.socket.send(b''.join([len_sender_hn, sender_hn]))
assert self.socket.recv(1) == b'\x06' # ACK
except AssertionError:
return printerror("Receiver rejected handshake")
except timeout:
return printerror('Operation timed out')
except:
return printerror("Error during handshake")
try:
len_hn = bytes_to_int_s(self.socket.recv(2))
self._recver_hostname = unpack_str(self.socket.recv(len_hn))
self._recver_token = self.socket.recv(6)
self._transfer_type = unpack_str(self.socket.recv(1))
len_wp = bytes_to_int_s(self.socket.recv(2))
self._worker_ports = [bytes_to_int_s(self.socket.recv(2))
for w in range(len_wp)]
self.socket.send(b'\x06') # ACK
except error:
return printerror("Error getting connected with socket")
except:
self.socket.send(b'\x15') # NAK
return printerror("Error getting parameters from receiver")
else:
self.param_set = True
class FlyterReciever:
"""
Handles Flyter file receiving processes.
Note: Receives from FlyterSender instances.
Parameters
----------
host_ip : str
The Host IP address to be used.
main_port : int
The main TCP port to be used.
num_workers : int
The amount of workers to be used during transmission.
"""
@staticmethod
def storage_dir(hostname=None):
"""
Return the path of the storage dir for received files.
If storage directory doesn't exist, creates it first.
Parameters
----------
hostname : str
The name of the subdirectory where that
host's sent files are stored.
"""
app_dirname = dirname(__file__)
appfiles_dirname = join(app_dirname, 'Flyter')
if not exists(appfiles_dirname):
mkdir(appfiles_dirname)
storage_dirname = join(appfiles_dirname, 'Received Files')
if not exists(storage_dirname):
mkdir(storage_dirname)
if hostname:
host_storage_dirname = join(storage_dirname, hostname)
if not exists(host_storage_dirname):
mkdir(host_storage_dirname)
return host_storage_dirname
else:
return storage_dirname
DEFAULT_PACKET_SIZE = 512
def __init__(self, host_ip, main_port, num_workers):
self.host_ip = host_ip
self.main_port = main_port
self.token = token_bytes(6)
self.transfer_type = 'S' if num_workers == 1 else 'M'
self.worker_ports = [
random_port(self.host_ip) for w in range(num_workers)
] if num_workers > 1 else []
self._sender_socket = None
self._sender_hostname = None
self._sender_token = None
self._sender_filename = None
self._sender_filesizes = None
self._packet_size = FlyterSender.DEFAULT_PACKET_SIZE
self._recving_file = False
self._workers_active = 0
self._progress_bar = ProgressBar(None)
try:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.bind((self.host_ip, self.main_port))
self.socket.settimeout(60)
self.workers = [
socket(AF_INET, SOCK_STREAM) for w in range(num_workers)
] if num_workers > 1 else []
if self.workers:
for w in range(num_workers):
self.workers[w].bind((self.host_ip, self.worker_ports[w]))
self.workers[w].settimeout(60)
except:
printerror('Error initializing sockets')
self.param_set = False
def __del__(self):
if isinstance(self.__dict__.get('socket'), socket):
self.socket.close()
if self.__dict__.get('workers'):
for w in self.workers:
w.close()
def _recv_s(self):
"""Receive a file with a single worker."""
if not self.param_set:
return printerror("Sender not yet set with parameters")
try:
self._recving_file = True
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
fs = self._sender_filesizes[0]
with open(path, 'bw') as f:
while self._recving_file and fs:
packet = self._sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
self._sender_socket.send(b'\x06') # ACK
except timeout:
self._progress_bar.stop()
return printerror("Operation timed out")
except FileNotFoundError:
self._progress_bar.stop()
return printerror("Downloading file has been deleted")
except PermissionError:
self._progress_bar.stop()
return printerror("Couldn't access storage directory")
except error:
self._progress_bar.stop()
return printerror("Error with socket")
except:
self._progress_bar.stop()
return printerror("Error receiving file")
else:
self._recving_file = False
return True
def _recv_m(self):
"""
Receive a file with multiple workers.
Speeds up transmission rate by using multiple workers.
"""
if not self.param_set:
return printerror("Sender not yet set with parameters")
def threadfunc(worker_num, fpath):
self._workers_active += 1
try:
recver_socket = self.workers[worker_num]
recver_socket.listen(1)
sender_socket, hostaddr = recver_socket.accept()
send_tok = sender_socket.recv(6)
if send_tok == self._sender_token:
sender_socket.send(b'\x06') # ACK
else:
sender_socket.send(b'\x15') # NAK
fs = self._sender_filesizes[worker_num]
with open(fpath, 'bw') as f:
while self._recving_file and f.writable() and fs:
packet = sender_socket.recv(self._packet_size)
f.write(packet)
self._progress_bar.add_progress(len(packet))
fs -= len(packet)
sender_socket.send(b'\x06') # ACK
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
return printerror("User aborted operation")
except timeout:
self._progress_bar.stop()
self._recving_file = False
return printerror("Operation timed out")
except error:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error with sockets")
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Error while receiving file")
finally:
self._workers_active -= 1
num_workers = len(self.workers)
self._recving_file = True
try:
for w in range(len(self.worker_ports)):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
Thread(
target=threadfunc,
args=(w, wpath),
).start()
except FileNotFoundError:
return printerror("Couldn't access file")
except PermissionError:
return printerror("Couldn't access file due to permission error")
while self._workers_active:
try:
pass
except KeyboardInterrupt:
self._progress_bar.stop()
self._recving_file = False
printerror("User aborted operation")
self._recving_file = False
try:
# Build the file
path = join(
FlyterReciever.storage_dir(self._sender_hostname),
self._sender_filename
)
with open(path, 'bw') as output:
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
with open(wpath, 'br') as temp:
packet = True
while packet:
packet = temp.read(self._packet_size)
output.write(packet)
# Clear the contents of the temp file
open(wpath, 'bw').close()
# Delete the temp files
for w in range(num_workers):
wpath = join(
FlyterReciever.storage_dir(self._sender_hostname),
f"{w}_{self._sender_filename}"
)
unlink(wpath)
except PermissionError:
self._sender_socket.send(b'\x15') # NAK
return printerror("Couldn't save file due to permissions")
except error:
return printerror("Error with sockets")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while saving file")
else:
return True
def recv_file(self):
"""Receive a file."""
if not self.param_set:
return printerror("Not yet set with receiver's parameters")
# Headers
try:
tok = self._sender_socket.recv(6)
b64_tok = b64encode(tok).decode()
len_fn = bytes_to_int_s(self._sender_socket.recv(2))
fn = unpack_str(self._sender_socket.recv(len_fn))
len_fs = bytes_to_int_s(self._sender_socket.recv(2))
fs = [bytes_to_int_l(self._sender_socket.recv(4))
for s in range(len_fs)]
fs_all = sum(fs)
answer = input(f"{self._sender_hostname}-{b64_tok}"
f" wants to send: {fn} "
f"({ProgressBar.byte_rescale(fs_all)}). "
"Accept? (y/n) ")
if answer.lower() == 'y':
self._sender_socket.send(b'\x06') # ACK
else:
self._sender_socket.send(b'\x06') # NAK
return printalert("Rejected file transfer")
except error:
return printerror("Sender isn't available anymore")
except:
self._sender_socket.send(b'\x15') # NAK
return printerror("Error while receiving headers")
print(f"[ {gethostname()}-{b64encode(self.token).decode()} ] "
f"is now receiving file ({ProgressBar.byte_rescale(fs_all)})")
# Progress bar thread
self._progress_bar = ProgressBar(fs_all, 35)
self._progress_bar.start()
def progress_thread():
try:
# Wait until receiving file
while not self._recving_file:
pass
# Display until file is received
while not self._progress_bar.done:
self._progress_bar.display()
except:
return printerror("Error with progress thread")
Thread(target=progress_thread).start()
self._sender_token = tok
self._sender_filename = fn
self._sender_filesizes = fs
# Start receiving
try:
if self.transfer_type == 'S':
res = self._recv_s()
elif self.transfer_type == 'M':
res = self._recv_m()
else:
res = None
except:
self._progress_bar.stop()
self._recving_file = False
return printerror("Receiving file was unsuccessful")
else:
self._sender_socket.send(b'\x06') # ACK
# Wait for progress bar
while not self._progress_bar.done:
pass
self._progress_bar.display()
print(f"\nSuccessfully received: {self._sender_filename}")
return res
def send_param_set(self):
"""
Pack and send Receiver's parameter settings.
Used to set Sender's parameter settings used during
data transmissions.
"""
try:
printalert("Waiting for sender")
self.socket.listen(1)
self._sender_socket, addrport = self.socket.accept()
except timeout:
return printerror("No sender available")
except:
return printerror("Error while waiting for sender")
try:
len_sender_hn = bytes_to_int_s(self._sender_socket.recv(2))
sender_hn = self._sender_socket.recv(len_sender_hn)
self._sender_hostname = unpack_str(sender_hn)
self._sender_socket.send(b'\x06') # ACK
except timeout:
return printerror("Operation timed out")
except:
return printerror("Error during handshake")
try:
hn = pack_str(gethostname())
len_hn = int_to_bytes_s(len(hn))
tok = self.token
tr_type = pack_str(self.transfer_type)
len_wp = int_to_bytes_s(len(self.worker_ports))
wp = [int_to_bytes_s(port)
for port in self.worker_ports]
wp = b''.join(wp)
headers = b''.join([len_hn, hn, tok, tr_type, len_wp, wp])
except:
return printerror("Error building headers")
try:
self._sender_socket.send(headers)
assert self._sender_socket.recv(1) == b'\x06' # ACK
except:
return printerror("Error while sending headers to sender")
else:
self.param_set = True
# Simplified Functions
def send(ip_address, port, filepath):
"""
Send file to receiver on the same network.
Parameters
----------
ip_address : str
The target receiver's IP address.
port : int
The target receiver's main TCP port.
filepath : str
The path to the file to be sent.
"""
sender = FlyterSender(ip_address, port)
sender.recv_param_set()
return sender.send_file(filepath)
def receive(host_ip_address, port, workers=1):
"""
Receive a file from sender on the same network.
Parameters
----------
host_ip_address : str
The receiver's host IP address.
port : int
The receiver's host port to listen on.
workers : :obj:`int`, optional
The number of workers to use.
"""
receiver = FlyterReciever(host_ip_address, port, workers)
receiver.send_param_set()
receiver.recv_file()
if __name__ == '__main__':
parser = ArgumentParser(
prog="Flyter",
epilog="See '<command> --help' to read about a specific sub-command."
)
subparsers = parser.add_subparsers(
dest="action",
help="The action to be performed"
)
send_parser = subparsers.add_parser("send")
recv_parser = subparsers.add_parser("recv")
send_parser.add_argument('-i', '--ip',
required=True,
help="Target receiver's IP address")
send_parser.add_argument('-p', '--port',
type=int,
required=True,
help="Target receiver's TCP port number")
send_parser.add_argument('-f', '--file',
required=True,
help="Path to the file to be sent")
recv_parser.add_argument('-i', '--ip',
required=True,
help="Host IP address")
recv_parser.add_argument('-p', '--port',
type=int,
required=True,
help="TCP port to listen on")
recv_parser.add_argument('-w', '--workers',
type=int,
default=1,
help="TCP port to listen on")
if len(argv) > 1:
FROMTERMINAL = True
args = parser.parse_args()
if args.action == "send":
send(args.ip, args.port, args.file)
elif args.action == "recv":
receive(args.ip, args.port, args.workers)
else:
parser.print_help()
| 31.542965
| 79
| 0.55176
| 3,738
| 33,404
| 4.742643
| 0.129481
| 0.054152
| 0.035537
| 0.025722
| 0.514722
| 0.435921
| 0.401907
| 0.358416
| 0.326658
| 0.291403
| 0
| 0.008122
| 0.3587
| 33,404
| 1,058
| 80
| 31.572779
| 0.819345
| 0.099659
| 0
| 0.480337
| 0
| 0
| 0.14317
| 0.016439
| 0
| 0
| 0
| 0
| 0.015449
| 1
| 0.049157
| false
| 0.008427
| 0.019663
| 0
| 0.189607
| 0.109551
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a29f1417c7897dd77e238158ebcffa7aedd19a4
| 14,751
|
py
|
Python
|
tests/test_modeling_tf_led.py
|
patelrajnath/transformers
|
98afe9d7c94a840d4b30c7eb76f9bfe570d2ed50
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_tf_led.py
|
patelrajnath/transformers
|
98afe9d7c94a840d4b30c7eb76f9bfe570d2ed50
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_tf_led.py
|
patelrajnath/transformers
|
98afe9d7c94a840d4b30c7eb76f9bfe570d2ed50
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class TFLEDModelTester:
config_cls = LEDConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
attention_window=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.attention_window = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
self.key_length = self.attention_window + 1
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
self.encoder_seq_length = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
attention_window=self.attention_window,
**self.config_updates,
)
inputs_dict = prepare_led_inputs_dict(config, input_ids, decoder_input_ids)
global_attention_mask = tf.concat(
[tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]],
axis=-1,
)
inputs_dict["global_attention_mask"] = global_attention_mask
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFLEDModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_led_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.cast(tf.math.not_equal(decoder_input_ids, config.pad_token_id), tf.int8)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
class TFLEDModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
all_generative_model_classes = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = TFLEDModelTester(self)
self.config_tester = ConfigTester(self, config_class=LEDConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert x is None
name = model.get_prefix_bias_name()
assert name is None
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["global_attention_mask"] = tf.zeros_like(inputs_dict["attention_mask"])
num_global_attn_indices = 2
inputs_dict["global_attention_mask"] = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices,
1,
inputs_dict["global_attention_mask"],
)
config.return_dict = True
seq_length = self.model_tester.seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(outputs):
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
def check_encoder_attentions_output(outputs):
attentions = [t.numpy() for t in outputs.encoder_attentions]
global_attentions = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertEqual(len(global_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, seq_length],
)
self.assertListEqual(
list(global_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
@slow
def test_saved_model_with_attentions_output(self):
# longformer has special attentions which are not
# compatible in graph mode
pass
@slow
def test_saved_model_with_hidden_states_output(self):
# TODO(JPLU, PVP) this test should pass!!! PVP:
# IMO there is a problem with the signature check.
# Test passes for TFLEDModel, but not for TFLEDForConditionalGeneration
# IMO the reason is that the tensor variable name cannot be changed
# from decoder_input_ids -> input_ids, which poses a BIG restrictions
pass
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
TOLERANCE = 1e-4
@slow
@require_tf
class TFLEDModelIntegrationTest(unittest.TestCase):
def test_inference_no_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, 768)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
def test_inference_with_head(self):
model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
# change to intended input here
input_ids = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
decoder_input_ids = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]])
inputs_dict = prepare_led_inputs_dict(model.config, input_ids, decoder_input_ids)
output = model(**inputs_dict)[0]
expected_shape = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]],
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE)
| 42.266476
| 119
| 0.686055
| 1,905
| 14,751
| 4.974803
| 0.186352
| 0.030389
| 0.022159
| 0.011396
| 0.45278
| 0.364778
| 0.272871
| 0.253772
| 0.221062
| 0.216208
| 0
| 0.027379
| 0.229951
| 14,751
| 348
| 120
| 42.387931
| 0.806937
| 0.137075
| 0
| 0.198413
| 0
| 0
| 0.023727
| 0.011824
| 0
| 0
| 0
| 0.002874
| 0.09127
| 1
| 0.06746
| false
| 0.007937
| 0.027778
| 0.003968
| 0.154762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2a4e7e62506f1bbd8360775e618cece1d71944
| 5,239
|
py
|
Python
|
src/wann_genetic/individual/numpy/ffnn.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
src/wann_genetic/individual/numpy/ffnn.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
src/wann_genetic/individual/numpy/ffnn.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
import numpy as np
import sklearn
import logging
from wann_genetic.individual.network_base import BaseFFNN
def softmax(x, axis=-1):
"""Compute softmax values for each sets of scores in x.
Returns:
softmax - softmax normalized in dim axis
"""
e_x = np.exp(x - np.expand_dims(np.max(x,axis=axis), axis=axis))
s = (e_x / np.expand_dims(e_x.sum(axis=-1), axis=axis))
return s
def apply_act_function(available_funcs, selected_funcs, x=None):
"""Apply the activation function of the selected nodes to their sums.
This fullfils the same function as the
:class:`wann_genetic.individual.torch.ffn.MultiActivationModule`.
"""
if x is not None:
result = np.empty(x.shape)
for i, func in enumerate(selected_funcs):
assert func < len(available_funcs)
result[..., i] = available_funcs[func][1](x[..., i])
return result
else:
return np.array([ # return function names
available_funcs[func][0] for func in selected_funcs
])
class Network(BaseFFNN):
"""Numpy implmentation of a Feed Forward Neural Network
For an explanation of how propagation works, see :doc:`numpy_network`.
"""
# Definition of the activations functions
available_act_functions = [
('relu', lambda x: np.maximum(0, x)),
('sigmoid', lambda x: (np.tanh(x/2.0) + 1.0)/2.0),
('tanh', lambda x: np.tanh(x)),
('gaussian (standard)', lambda x: np.exp(-np.multiply(x, x) / 2.0)),
('step', lambda x: 1.0*(x>0.0)),
('identity', lambda x: x),
('inverse', lambda x: -x),
('squared', lambda x: x**2), # unstable if applied multiple times
('abs', lambda x: np.abs(x)),
('cos', lambda x: np.cos(np.pi*x)),
('sin ', lambda x: np.sin(np.pi*x)),
]
enabled_act_functions = available_act_functions
def get_measurements(self, weights, x, y_true=None, measures=['predictions']):
assert len(x.shape) == 2 # multiple one dimensional input arrays
assert isinstance(weights, np.ndarray)
# initial activations
act_vec = np.empty((weights.shape[0], x.shape[0], self.n_nodes), dtype=float)
act_vec[..., :self.n_in] = x[...]
act_vec[..., self.n_in] = 1 # bias
# propagate signal through all layers
for active_nodes in self.layers():
act_vec[..., active_nodes] = self.calc_act(act_vec, active_nodes, weights)
# if any node is nan, we cant rely on the result
valid = np.all(~np.isnan(act_vec), axis=-1)
act_vec[~valid, :] = np.nan
y_raw = act_vec[..., -self.n_out:]
return self.measurements_from_output(y_raw, y_true, measures)
def measurements_from_output(self, y_raw, y_true, measures):
return_values = dict()
if 'raw' in measures:
return_values['raw'] = y_raw
y_pred = np.argmax(y_raw, axis=-1)
y_prob = softmax(y_raw, axis=-1)
if 'probabilities' in measures:
return_values['probabilities'] = y_prob
if 'predictions' in measures:
return_values['predictions'] = y_pred
y_raw = y_raw.reshape(y_raw.shape[0], -1, self.n_out)
y_prob = y_prob.reshape(y_raw.shape[0], -1, self.n_out)
y_pred = y_pred.reshape(y_raw.shape[0], -1)
if y_true is not None:
y_true = y_true.reshape(-1)
if 'log_loss' in measures:
# nan is same as maximally falsely predicted
y_prob[~np.isfinite(y_prob)] = 0
return_values['log_loss'] = np.array([
sklearn.metrics.log_loss(y_true, prob, labels=np.arange(self.n_out))
for prob in y_prob
])
if 'mse_loss' in measures:
return_values['mse_loss'] = np.array([
sklearn.metrics.mean_squared_error(y_true, raw)
for raw in y_raw
])
if 'accuracy' in measures:
return_values['accuracy'] = np.array([
sklearn.metrics.accuracy_score(y_true, pred)
for pred in y_pred
])
if 'kappa' in measures:
return_values['kappa'] = np.array([
sklearn.metrics.cohen_kappa_score(y_true, pred)
for pred in y_pred
])
return return_values
def activation_functions(self, nodes, x=None):
funcs = self.nodes['func'][nodes - self.offset]
return apply_act_function(self.enabled_act_functions, funcs, x)
def calc_act(self, x, active_nodes, base_weights, add_to_sum=0):
"""Apply updates for active nodes (active nodes can't share edges).
"""
addend_nodes = active_nodes[0]
M = self.weight_matrix[:addend_nodes, active_nodes - self.offset]
# x3d: weights, samples, source nodes
# M3d: weights, source, target
# multiply relevant weight matrix with base weights
M3d = M[None, :, :] * base_weights[:, None, None]
x3d = x[..., :addend_nodes]
act_sums = np.matmul(x3d, M3d) + add_to_sum
# apply activation function for active nodes
return self.activation_functions(active_nodes, act_sums)
| 33.158228
| 86
| 0.601642
| 722
| 5,239
| 4.192521
| 0.261773
| 0.015857
| 0.020813
| 0.043608
| 0.087876
| 0.042286
| 0.03634
| 0.03634
| 0.03634
| 0.017839
| 0
| 0.010821
| 0.27677
| 5,239
| 157
| 87
| 33.369427
| 0.788071
| 0.175988
| 0
| 0.075269
| 0
| 0
| 0.046364
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.064516
| false
| 0
| 0.043011
| 0
| 0.215054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2acc58ab0f0250a6af12c5eb3f75f975289067
| 14,665
|
py
|
Python
|
common/tests/util.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 14
|
2020-03-25T11:11:29.000Z
|
2022-03-08T20:41:33.000Z
|
common/tests/util.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 352
|
2020-03-25T10:42:09.000Z
|
2022-03-30T15:32:26.000Z
|
common/tests/util.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 3
|
2020-08-06T12:22:41.000Z
|
2022-01-16T11:51:12.000Z
|
import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
@contextlib.contextmanager
def raises_if(exception, expected):
try:
yield
except exception:
if not expected:
raise
else:
if expected:
pytest.fail(f"Did not raise {exception}")
def check_validator(validate, value, expected_valid):
try:
validate(value)
except ValidationError:
if expected_valid:
pytest.fail(f'Unexpected validation error for value "{value}"')
except Exception:
raise
else:
if not expected_valid:
pytest.fail(f'Expected validation error for value "{value}"')
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def generate_test_import_xml(obj: dict) -> BytesIO:
xml = render_to_string(
template_name="workbaskets/taric/transaction_detail.xml",
context={
"envelope_id": next(_transaction_counter),
"tracked_models": [obj],
"transaction_id": next(_transaction_counter),
"message_counter": counter_generator(),
"counter_generator": counter_generator,
},
)
return BytesIO(xml.encode())
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def validate_taric_xml(
factory=None,
instance=None,
factory_kwargs=None,
check_order=True,
):
def decorator(func):
def wraps(
api_client,
taric_schema,
approved_transaction,
valid_user,
*args,
**kwargs,
):
if not factory and not instance:
raise AssertionError(
"Either a factory or an object instance need to be provided",
)
if factory and instance:
raise AssertionError(
"Either a factory or an object instance need to be provided - not both.",
)
current_instance = instance or factory.create(
transaction=approved_transaction, **factory_kwargs or {}
)
api_client.force_login(user=valid_user)
response = api_client.get(
reverse(
"workbaskets:workbasket-detail",
kwargs={"pk": approved_transaction.workbasket.pk},
),
{"format": "xml"},
)
assert response.status_code == 200
content = response.content
xml = etree.XML(content)
taric_schema.validate(xml)
assert not taric_schema.error_log, f"XML errors: {taric_schema.error_log}"
if check_order:
validate_taric_xml_record_order(xml)
kwargs = {"xml": xml, **kwargs}
func(
*args,
**kwargs,
)
return wraps
return decorator
class Dates:
deltas = {
"normal": (relativedelta(), relativedelta(months=+1)),
"earlier": (relativedelta(years=-1), relativedelta(years=-1, months=+1)),
"later": (
relativedelta(years=+1, months=+1, days=+1),
relativedelta(years=+1, months=+2),
),
"big": (relativedelta(years=-2), relativedelta(years=+2, days=+1)),
"adjacent": (relativedelta(days=+1), relativedelta(months=+1)),
"adjacent_earlier": (relativedelta(months=-1), relativedelta(days=-1)),
"adjacent_later": (relativedelta(months=+1, days=+1), relativedelta(months=+2)),
"adjacent_no_end": (relativedelta(months=+1, days=+1), None),
"adjacent_even_later": (
relativedelta(months=+2, days=+1),
relativedelta(months=+3),
),
"adjacent_earlier_big": (
relativedelta(years=-2, months=-2),
relativedelta(years=-2),
),
"adjacent_later_big": (
relativedelta(months=+1, days=+1),
relativedelta(years=+2, months=+2),
),
"overlap_normal": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1, years=+1),
),
"overlap_normal_earlier": (
relativedelta(months=-1, days=+14),
relativedelta(days=+14),
),
"overlap_normal_same_year": (
relativedelta(days=+15),
relativedelta(days=+14, months=+1),
),
"overlap_big": (relativedelta(years=+1), relativedelta(years=+3, days=+2)),
"after_big": (
relativedelta(years=+3, months=+1),
relativedelta(years=+3, months=+2),
),
"backwards": (relativedelta(months=+1), relativedelta(days=+1)),
"starts_with_normal": (relativedelta(), relativedelta(days=+14)),
"ends_with_normal": (relativedelta(days=+14), relativedelta(months=+1)),
"current": (relativedelta(weeks=-4), relativedelta(weeks=+4)),
"future": (relativedelta(weeks=+10), relativedelta(weeks=+20)),
"no_end": (relativedelta(), None),
"normal_first_half": (relativedelta(), relativedelta(days=+14)),
}
@property
def now(self):
return self.datetime_now.date()
@property
def datetime_now(self):
return datetime.now(tz=UTC).replace(hour=0, minute=0, second=0, microsecond=0)
def __getattr__(self, name):
if name in self.deltas:
start, end = self.deltas[name]
start = self.now + start
if end is not None:
end = self.now + end
return TaricDateRange(start, end)
raise AttributeError(name)
@classmethod
def short_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-14),
)
@classmethod
def medium_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(days=-1),
)
@classmethod
def short_after(cls, dt):
return TaricDateRange(
dt + relativedelta(days=+14),
dt + relativedelta(months=+1),
)
@classmethod
def short_overlap(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
dt + relativedelta(months=+1),
)
@classmethod
def no_end_before(cls, dt):
return TaricDateRange(
dt + relativedelta(months=-1),
None,
)
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
def decorator(fn):
@wraps(fn)
def do_test(*args, **kwargs):
# test should pass normally
fn(*args, **kwargs)
# test should fail before cutoff
with freeze_time(cutoff + relativedelta(days=-1)):
try:
fn(*args, **kwargs)
except pytest.fail.Exception:
pass
except Exception:
raise
else:
pytest.fail(f"Rule applied before {cutoff:%Y-%m-%d}")
return True
return do_test
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
| 31.268657
| 93
| 0.633277
| 1,685
| 14,665
| 5.361424
| 0.217211
| 0.015497
| 0.033208
| 0.012619
| 0.264113
| 0.197144
| 0.159951
| 0.159951
| 0.149989
| 0.137259
| 0
| 0.011576
| 0.269553
| 14,665
| 468
| 94
| 31.33547
| 0.831777
| 0.16345
| 0
| 0.21118
| 0
| 0
| 0.096074
| 0.019048
| 0
| 0
| 0
| 0
| 0.02795
| 1
| 0.07764
| false
| 0.003106
| 0.124224
| 0.021739
| 0.270186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2b4094e1ca26bb245cb9af7bc67b4f16fdf9b2
| 2,224
|
py
|
Python
|
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
|
openforcefield/nistdataselection
|
d797d597f4ff528a7219d58daa8ef6508d438b24
|
[
"MIT"
] | 3
|
2020-03-25T02:42:04.000Z
|
2020-07-20T10:39:35.000Z
|
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
|
openforcefield/nistdataselection
|
d797d597f4ff528a7219d58daa8ef6508d438b24
|
[
"MIT"
] | 13
|
2019-09-05T00:20:03.000Z
|
2020-03-05T23:58:04.000Z
|
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
|
openforcefield/nistdataselection
|
d797d597f4ff528a7219d58daa8ef6508d438b24
|
[
"MIT"
] | null | null | null |
from evaluator import unit
from evaluator.backends import QueueWorkerResources
from evaluator.backends.dask import DaskLSFBackend
from evaluator.client import ConnectionOptions, EvaluatorClient
from evaluator.datasets import PhysicalPropertyDataSet
from evaluator.forcefield import SmirnoffForceFieldSource
from evaluator.server import EvaluatorServer
from evaluator.utils import setup_timestamp_logging
def main():
setup_timestamp_logging()
# Load in the force field
force_field_path = "openff-1.0.0.offxml"
force_field_source = SmirnoffForceFieldSource.from_path(force_field_path)
# Load in the test set.
data_set = PhysicalPropertyDataSet.from_json("full_set.json")
# Set up a server object to run the calculations using.
working_directory = "working_directory"
# Set up a backend to run the calculations on. This assume running
# on a HPC resources with the LSF queue system installed.
queue_resources = QueueWorkerResources(
number_of_threads=1,
number_of_gpus=1,
preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
per_thread_memory_limit=5 * unit.gigabyte,
wallclock_time_limit="05:59",
)
worker_script_commands = ["conda activate forcebalance", "module load cuda/10.1"]
calculation_backend = DaskLSFBackend(
minimum_number_of_workers=1,
maximum_number_of_workers=50,
resources_per_worker=queue_resources,
queue_name="gpuqueue",
setup_script_commands=worker_script_commands,
adaptive_interval="1000ms",
)
with calculation_backend:
server = EvaluatorServer(
calculation_backend=calculation_backend,
working_directory=working_directory,
port=8004,
)
with server:
# Request the estimates.
client = EvaluatorClient(ConnectionOptions(server_port=8004))
request, _ = client.request_estimate(
property_set=data_set, force_field_source=force_field_source,
)
# Wait for the results.
results, _ = request.results(True, 5)
results.json(f"results.json")
if __name__ == "__main__":
main()
| 31.771429
| 85
| 0.71223
| 247
| 2,224
| 6.133603
| 0.441296
| 0.068647
| 0.031683
| 0.026403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016802
| 0.223921
| 2,224
| 69
| 86
| 32.231884
| 0.86095
| 0.119155
| 0
| 0
| 0
| 0
| 0.069708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.177778
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2b482bae656ac79eb981d550db6a1224027b57
| 2,268
|
py
|
Python
|
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 128
|
2021-12-06T15:41:14.000Z
|
2022-03-29T13:16:32.000Z
|
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 28
|
2021-12-11T08:11:31.000Z
|
2022-03-25T02:35:43.000Z
|
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 14
|
2021-12-11T04:12:26.000Z
|
2022-03-24T06:38:30.000Z
|
import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestProfileIDM(unittest.TestCase):
"""
Profiling test for IDM agents.
"""
def setUp(self) -> None:
"""
Inherited, see super class.
"""
self.n_repeat_trials = 1
self.display_results = True
self.scenario = get_test_nuplan_scenario()
def test_profile_idm_agent_observation(self) -> None:
"""Profile IDMAgents."""
profiler = Profiler(interval=0.0001)
profiler.start()
# How many times to repeat runtime test
for _ in range(self.n_repeat_trials):
observation = IDMAgents(
target_velocity=10,
min_gap_to_lead_agent=0.5,
headway_time=1.5,
accel_max=1.0,
decel_max=2.0,
scenario=self.scenario,
)
for step in range(self.scenario.get_number_of_iterations() - 1):
iteration = SimulationIteration(time_point=self.scenario.get_time_point(step), index=step)
next_iteration = SimulationIteration(time_point=self.scenario.get_time_point(step + 1), index=step + 1)
buffer = SimulationHistoryBuffer.initialize_from_list(
1,
[self.scenario.get_ego_state_at_iteration(step)],
[self.scenario.get_tracked_objects_at_iteration(step)],
next_iteration.time_point.time_s - iteration.time_point.time_s,
)
observation.update_observation(iteration, next_iteration, buffer)
profiler.stop()
if self.display_results:
logger.info(profiler.output_text(unicode=True, color=True))
if __name__ == "__main__":
unittest.main()
| 36.580645
| 119
| 0.665785
| 252
| 2,268
| 5.690476
| 0.392857
| 0.058577
| 0.062762
| 0.058577
| 0.122734
| 0.090656
| 0.090656
| 0.090656
| 0.090656
| 0.090656
| 0
| 0.011799
| 0.252646
| 2,268
| 61
| 120
| 37.180328
| 0.834218
| 0.051146
| 0
| 0
| 0
| 0
| 0.003795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.170732
| 0
| 0.243902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2c405aef1ab33457cf8c88423bb2ac392300fb
| 11,867
|
py
|
Python
|
baselines/ddpg/ddpg.py
|
RDaneelOlivav/baselines
|
fea6ba932055bb76d68b4b22e812bab738fc18f8
|
[
"MIT"
] | 11
|
2021-02-23T17:15:21.000Z
|
2021-09-08T21:31:57.000Z
|
baselines/ddpg/ddpg.py
|
RDaneelOlivav/baselines
|
fea6ba932055bb76d68b4b22e812bab738fc18f8
|
[
"MIT"
] | 1
|
2021-03-04T05:49:46.000Z
|
2021-03-04T10:50:59.000Z
|
baselines/ddpg/ddpg.py
|
RDaneelOlivav/baselines
|
fea6ba932055bb76d68b4b22e812bab738fc18f8
|
[
"MIT"
] | 2
|
2021-01-29T10:40:35.000Z
|
2021-03-03T08:03:59.000Z
|
import os
import os.path as osp
import time
from collections import deque
import pickle
from baselines.ddpg.ddpg_learner import DDPG
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from baselines.common import set_global_seeds
from baselines import logger
import tensorflow as tf
import numpy as np
try:
from mpi4py import MPI
except ImportError:
MPI = None
def learn(network, env,
seed=None,
total_timesteps=None,
nb_epochs=None, # with default settings, perform 1M steps total
nb_epoch_cycles=20,
nb_rollout_steps=100,
reward_scale=1.0,
render=False,
render_eval=False,
noise_type='adaptive-param_0.2',
normalize_returns=False,
normalize_observations=True,
critic_l2_reg=1e-2,
actor_lr=1e-4,
critic_lr=1e-3,
popart=False,
gamma=0.99,
clip_norm=None,
nb_train_steps=50, # per epoch cycle and MPI worker,
nb_eval_steps=100,
batch_size=64, # per MPI worker
tau=0.01,
eval_env=None,
param_noise_adaption_interval=50,
load_path=None,
**network_kwargs):
set_global_seeds(seed)
if total_timesteps is not None:
assert nb_epochs is None
nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps)
else:
nb_epochs = 500
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
nb_actions = env.action_space.shape[-1]
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(nb_actions, ob_shape=env.observation_space.shape, network=network, **network_kwargs)
actor = Actor(nb_actions, ob_shape=env.observation_space.shape, network=network, **network_kwargs)
action_noise = None
param_noise = None
if noise_type is not None:
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'adaptive-param' in current_noise_type:
_, stddev = current_noise_type.split('_')
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev))
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
if load_path is not None:
load_path = osp.expanduser(load_path)
ckpt = tf.train.Checkpoint(model=agent)
manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
print("Restoring from {}".format(manager.latest_checkpoint))
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
# Prepare everything.
agent.initialize()
agent.reset()
obs = env.reset()
if eval_env is not None:
eval_obs = eval_env.reset()
nenvs = obs.shape[0]
episode_reward = np.zeros(nenvs, dtype = np.float32) #vector
episode_step = np.zeros(nenvs, dtype = int) # vector
episodes = 0 #scalar
t = 0 # scalar
epoch = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
if nenvs > 1:
# if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each
# of the environments, so resetting here instead
agent.reset()
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q, _, _ = agent.step(tf.constant(obs), apply_noise=True, compute_Q=True)
action, q = action.numpy(), q.numpy()
# Execute next action.
if rank == 0 and render:
env.render()
# max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch
new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
# note these outputs are batched from vecenv
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done) #the batched data will be unrolled in memory.py's append.
obs = new_obs
for d in range(len(done)):
if done[d]:
# Episode done.
epoch_episode_rewards.append(episode_reward[d])
episode_rewards_history.append(episode_reward[d])
epoch_episode_steps.append(episode_step[d])
episode_reward[d] = 0.
episode_step[d] = 0
epoch_episodes += 1
episodes += 1
if nenvs == 1:
agent.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0:
batch = agent.memory.sample(batch_size=batch_size)
obs0 = tf.constant(batch['obs0'])
distance = agent.adapt_param_noise(obs0)
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if eval_env is not None:
nenvs_eval = eval_obs.shape[0]
eval_episode_reward = np.zeros(nenvs_eval, dtype = np.float32)
for t_rollout in range(nb_eval_steps):
eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True)
eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
if render_eval:
eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
for d in range(len(eval_done)):
if eval_done[d]:
eval_episode_rewards.append(eval_episode_reward[d])
eval_episode_rewards_history.append(eval_episode_reward[d])
eval_episode_reward[d] = 0.0
if MPI is not None:
mpi_size = MPI.COMM_WORLD.Get_size()
else:
mpi_size = 1
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = agent.get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_std'] = np.std(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/return_history_std'] = np.std(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(t) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if eval_env is not None:
combined_stats['eval/return'] = eval_episode_rewards
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = eval_qs
combined_stats['eval/episodes'] = len(eval_episode_rewards)
def as_scalar(x):
if isinstance(x, np.ndarray):
assert x.size == 1
return x[0]
elif np.isscalar(x):
return x
else:
raise ValueError('expected scalar, got %s'%x)
combined_stats_sums = np.array([ np.array(x).flatten()[0] for x in combined_stats.values()])
if MPI is not None:
combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums)
combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
if rank == 0:
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
return agent
| 41.493007
| 188
| 0.610854
| 1,459
| 11,867
| 4.717615
| 0.211104
| 0.05855
| 0.02557
| 0.013947
| 0.200203
| 0.159959
| 0.124219
| 0.093709
| 0.086154
| 0.069446
| 0
| 0.010216
| 0.298896
| 11,867
| 285
| 189
| 41.638596
| 0.817067
| 0.079717
| 0
| 0.093333
| 0
| 0
| 0.055913
| 0.010742
| 0
| 0
| 0
| 0
| 0.013333
| 1
| 0.008889
| false
| 0.004444
| 0.066667
| 0
| 0.088889
| 0.004444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2e0012f198d1fec400f883216fa2149bcfd26b
| 1,889
|
py
|
Python
|
footprints/transaction_details.py
|
enwawerueli/footprints
|
d9b2a0064b21495edfd0563cb521b0675ee4363d
|
[
"MIT"
] | 1
|
2018-10-11T19:23:08.000Z
|
2018-10-11T19:23:08.000Z
|
footprints/transaction_details.py
|
enwawerueli/footprints
|
d9b2a0064b21495edfd0563cb521b0675ee4363d
|
[
"MIT"
] | null | null | null |
footprints/transaction_details.py
|
enwawerueli/footprints
|
d9b2a0064b21495edfd0563cb521b0675ee4363d
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtPrintSupport import QPrinter, QPrintDialog
from jinja2 import TemplateNotFound
from .ui.ui_transaction_details import Ui_TransactionDetails
from .ui import images_rc
from . import jinja_env
from .exceptions import PrinterError
class TransactionDetails(QDialog, Ui_TransactionDetails):
def __init__(self, transaction, parent=None, *args, **kwargs):
QDialog.__init__(self, parent, *args, **kwargs)
self._transaction = transaction
self.setupUi(self)
self.setWindowTitle(QApplication.applicationName())
self.print_pb.setIcon(QIcon.fromTheme('document-print-symbolic', QIcon(':/icons/print')))
try:
trans = jinja_env.get_template('trans.jinja2.html')
except TemplateNotFound:
pass
else:
html = trans.render(transaction=self._transaction, standalone=True)
self.statement_tb.setHtml(html)
self.print_pb.clicked.connect(self.print_statement)
def print_statement(self):
printer = QPrinter()
printer.setOutputFileName(os.path.join(
os.environ.get('HOME'), '%s_%s.pdf' %
(self._transaction.created_at.strftime('%Y%m%d'), self._transaction.transaction_code)))
if QPrintDialog(printer, self.parentWidget()).exec_() != QDialog.Accepted:
return None
try:
trans = jinja_env.get_template('trans.jinja2.html')
except TemplateNotFound as e:
raise PrinterError('Printer data source unavailable') from e
html = trans.render(transaction=self._transaction, printed_at=datetime.now().strftime('%d/%m/%Y, %I:%M:%S %p'))
doc = QTextDocument(self)
doc.setHtml(html)
doc.print_(printer)
return None
| 38.55102
| 119
| 0.68343
| 214
| 1,889
| 5.873832
| 0.429907
| 0.071599
| 0.040573
| 0.025457
| 0.167064
| 0.167064
| 0.10183
| 0.10183
| 0.10183
| 0.10183
| 0
| 0.004692
| 0.210164
| 1,889
| 48
| 120
| 39.354167
| 0.837802
| 0
| 0
| 0.142857
| 0
| 0
| 0.074643
| 0.012176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.02381
| 0.261905
| 0
| 0.380952
| 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a2e68851d4d316362a1de570d5c1e2e08a4775e
| 64,070
|
py
|
Python
|
yt/units/yt_array.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/units/yt_array.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/units/yt_array.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent to:
>>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
_ufunc_registry = {
add: preserve_units,
subtract: preserve_units,
multiply: multiply_units,
divide: divide_units,
logaddexp: return_without_unit,
logaddexp2: return_without_unit,
true_divide: divide_units,
floor_divide: divide_units,
negative: passthrough_unit,
power: power_unit,
remainder: preserve_units,
mod: preserve_units,
fmod: preserve_units,
absolute: passthrough_unit,
fabs: passthrough_unit,
rint: return_without_unit,
sign: return_without_unit,
conj: passthrough_unit,
exp: return_without_unit,
exp2: return_without_unit,
log: return_without_unit,
log2: return_without_unit,
log10: return_without_unit,
expm1: return_without_unit,
log1p: return_without_unit,
sqrt: sqrt_unit,
square: square_unit,
reciprocal: reciprocal_unit,
sin: return_without_unit,
cos: return_without_unit,
tan: return_without_unit,
sinh: return_without_unit,
cosh: return_without_unit,
tanh: return_without_unit,
arcsin: return_without_unit,
arccos: return_without_unit,
arctan: return_without_unit,
arctan2: arctan2_unit,
arcsinh: return_without_unit,
arccosh: return_without_unit,
arctanh: return_without_unit,
hypot: preserve_units,
deg2rad: return_without_unit,
rad2deg: return_without_unit,
bitwise_and: bitop_units,
bitwise_or: bitop_units,
bitwise_xor: bitop_units,
invert: invert_units,
left_shift: bitop_units,
right_shift: bitop_units,
greater: comparison_unit,
greater_equal: comparison_unit,
less: comparison_unit,
less_equal: comparison_unit,
not_equal: comparison_unit,
equal: comparison_unit,
logical_and: comparison_unit,
logical_or: comparison_unit,
logical_xor: comparison_unit,
logical_not: return_without_unit,
maximum: preserve_units,
minimum: preserve_units,
fmax: preserve_units,
fmin: preserve_units,
isreal: return_without_unit,
iscomplex: return_without_unit,
isfinite: return_without_unit,
isinf: return_without_unit,
isnan: return_without_unit,
signbit: return_without_unit,
copysign: passthrough_unit,
nextafter: preserve_units,
modf: passthrough_unit,
ldexp: bitop_units,
frexp: return_without_unit,
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
spacing: passthrough_unit,
positive: passthrough_unit,
divmod_: passthrough_unit,
isnat: return_without_unit,
heaviside: preserve_units,
}
__array_priority__ = 2.0
def __new__(cls, input_array, input_units=None, registry=None, dtype=None,
bypass_validation=False):
if dtype is None:
dtype = getattr(input_array, 'dtype', np.float64)
if bypass_validation is True:
obj = np.asarray(input_array, dtype=dtype).view(cls)
obj.units = input_units
if registry is not None:
obj.units.registry = registry
return obj
if input_array is NotImplemented:
return input_array.view(cls)
if registry is None and isinstance(input_units, (str, bytes)):
if input_units.startswith('code_'):
raise UnitParseError(
"Code units used without referring to a dataset. \n"
"Perhaps you meant to do something like this instead: \n"
"ds.arr(%s, \"%s\")" % (input_array, input_units)
)
if isinstance(input_array, YTArray):
ret = input_array.view(cls)
if input_units is None:
if registry is None:
ret.units = input_array.units
else:
units = Unit(str(input_array.units), registry=registry)
ret.units = units
elif isinstance(input_units, Unit):
ret.units = input_units
else:
ret.units = Unit(input_units, registry=registry)
return ret
elif isinstance(input_array, np.ndarray):
pass
elif iterable(input_array) and input_array:
if isinstance(input_array[0], YTArray):
return YTArray(np.array(input_array, dtype=dtype),
input_array[0].units, registry=registry)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array, dtype=dtype).view(cls)
# Check units type
if input_units is None:
# Nothing provided. Make dimensionless...
units = Unit()
elif isinstance(input_units, Unit):
if registry and registry is not input_units.registry:
units = Unit(str(input_units), registry=registry)
else:
units = input_units
else:
# units kwarg set, but it's not a Unit object.
# don't handle all the cases here, let the Unit class handle if
# it's a str.
units = Unit(input_units, registry=registry)
# Attach the units
obj.units = units
return obj
def __repr__(self):
"""
"""
return super(YTArray, self).__repr__()+' '+self.units.__repr__()
def __str__(self):
"""
"""
return str(self.view(np.ndarray)) + ' ' + str(self.units)
#
# Start unit conversion methods
#
def convert_to_units(self, units):
"""
Convert the array and units to the given units.
Parameters
----------
units : Unit object or str
The units you want to convert to.
"""
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
self.units = new_units
values = self.d
values *= conversion_factor
if offset:
np.subtract(self, offset*self.uq, self)
return self
def convert_to_base(self, unit_system="cgs"):
"""
Convert the array and units to the equivalent base units in
the specified unit system.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E.convert_to_base(unit_system="galactic")
"""
return self.convert_to_units(self.units.get_base_equivalent(unit_system))
def convert_to_cgs(self):
"""
Convert the array and units to the equivalent cgs units.
"""
return self.convert_to_units(self.units.get_cgs_equivalent())
def convert_to_mks(self):
"""
Convert the array and units to the equivalent mks units.
"""
return self.convert_to_units(self.units.get_mks_equivalent())
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
YTArray
"""
if equivalence is None:
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
new_array = type(self)(self.ndview * conversion_factor, new_units)
if offset:
np.subtract(new_array, offset*new_array.uq, new_array)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs)
def to(self, units, equivalence=None, **kwargs):
"""
An alias for YTArray.in_units().
See the docstrings of that function for details.
"""
return self.in_units(units, equivalence=equivalence, **kwargs)
def to_value(self, units=None, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it without units. Output is therefore a
bare NumPy array.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string, optional
The units you want to get the bare quantity in. If not
specified, the value will be returned in the current units.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
NumPy array
"""
if units is None:
v = self.value
else:
v = self.in_units(units, equivalence=equivalence, **kwargs).value
if isinstance(self, YTQuantity):
return float(v)
else:
return v
def in_base(self, unit_system="cgs"):
"""
Creates a copy of this array with the data in the specified unit system,
and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E_new = E.in_base(unit_system="galactic")
"""
return self.in_units(self.units.get_base_equivalent(unit_system))
def in_cgs(self):
"""
Creates a copy of this array with the data in the equivalent cgs units,
and returns it.
Returns
-------
Quantity object with data converted to cgs units.
"""
return self.in_units(self.units.get_cgs_equivalent())
def in_mks(self):
"""
Creates a copy of this array with the data in the equivalent mks units,
and returns it.
Returns
-------
Quantity object with data converted to mks units.
"""
return self.in_units(self.units.get_mks_equivalent())
def to_equivalent(self, unit, equiv, **kwargs):
"""
Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
related by only a constant factor but not in the same units.
Parameters
----------
unit : string
The unit that you wish to convert to.
equiv : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> a = yt.YTArray(1.0e7,"K")
>>> a.to_equivalent("keV", "thermal")
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equiv]()
oneway_or_equivalent = (
conv_unit.has_equivalent(equiv) or this_equiv._one_way)
if self.has_equivalent(equiv) and oneway_or_equivalent:
new_arr = this_equiv.convert(
self, conv_unit.dimensions, **kwargs)
if isinstance(new_arr, tuple):
try:
return type(self)(new_arr[0], new_arr[1]).in_units(unit)
except YTUnitConversionError:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
else:
return new_arr.in_units(unit)
else:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
def list_equivalencies(self):
"""
Lists the possible equivalencies associated with this YTArray or
YTQuantity.
"""
self.units.list_equivalencies()
def has_equivalent(self, equiv):
"""
Check to see if this YTArray or YTQuantity has an equivalent unit in
*equiv*.
"""
return self.units.has_equivalent(equiv)
def ndarray_view(self):
"""
Returns a view into the array, but as an ndarray rather than ytarray.
Returns
-------
View of this array's data.
"""
return self.view(np.ndarray)
def to_ndarray(self):
"""
Creates a copy of this array with the unit information stripped
"""
return np.array(self)
@classmethod
def from_astropy(cls, arr, unit_registry=None):
"""
Convert an AstroPy "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : AstroPy Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
np.bitwise_and(self, other, out=self)
return self
def __pow__(self, power):
"""
Raise this YTArray to some power.
Parameters
----------
power : float or dimensionless YTArray.
The pow value.
"""
if isinstance(power, YTArray):
if not power.units.is_dimensionless:
raise YTUnitOperationError('power', power.unit)
# Work around a sympy issue (I think?)
#
# If I don't do this, super(YTArray, self).__pow__ returns a YTArray
# with a unit attribute set to the sympy expression 1/1 rather than
# a dimensionless Unit object.
if self.units.is_dimensionless and power == -1:
ret = super(YTArray, self).__pow__(power)
return type(self)(ret, input_units='')
return super(YTArray, self).__pow__(power)
def __abs__(self):
""" Return a YTArray with the abs of the data. """
return super(YTArray, self).__abs__()
#
# Start comparison operators.
#
def __lt__(self, other):
""" Test if this is less than the object on the right. """
# converts if possible
oth = validate_comparison_units(self, other, 'less_than')
return super(YTArray, self).__lt__(oth)
def __le__(self, other):
"""Test if this is less than or equal to the object on the right.
"""
oth = validate_comparison_units(self, other, 'less_than or equal')
return super(YTArray, self).__le__(oth)
def __eq__(self, other):
""" Test if this is equal to the object on the right. """
# Check that other is a YTArray.
if other is None:
# self is a YTArray, so it can't be None.
return False
oth = validate_comparison_units(self, other, 'equal')
return super(YTArray, self).__eq__(oth)
def __ne__(self, other):
""" Test if this is not equal to the object on the right. """
# Check that the other is a YTArray.
if other is None:
return True
oth = validate_comparison_units(self, other, 'not equal')
return super(YTArray, self).__ne__(oth)
def __ge__(self, other):
""" Test if this is greater than or equal to other. """
# Check that the other is a YTArray.
oth = validate_comparison_units(
self, other, 'greater than or equal')
return super(YTArray, self).__ge__(oth)
def __gt__(self, other):
""" Test if this is greater than the object on the right. """
# Check that the other is a YTArray.
oth = validate_comparison_units(self, other, 'greater than')
return super(YTArray, self).__gt__(oth)
#
# End comparison operators
#
#
# Begin reduction operators
#
@return_arr
def prod(self, axis=None, dtype=None, out=None):
if axis is not None:
units = self.units**self.shape[axis]
else:
units = self.units**self.size
return super(YTArray, self).prod(axis, dtype, out), units
@return_arr
def mean(self, axis=None, dtype=None, out=None):
return super(YTArray, self).mean(axis, dtype, out), self.units
@return_arr
def sum(self, axis=None, dtype=None, out=None):
return super(YTArray, self).sum(axis, dtype, out), self.units
@return_arr
def std(self, axis=None, dtype=None, out=None, ddof=0):
return super(YTArray, self).std(axis, dtype, out, ddof), self.units
def __array_wrap__(self, out_arr, context=None):
ret = super(YTArray, self).__array_wrap__(out_arr, context)
if isinstance(ret, YTQuantity) and ret.shape != ():
ret = ret.view(YTArray)
if context is None:
if ret.shape == ():
return ret[()]
else:
return ret
ufunc = context[0]
inputs = context[1]
if ufunc in unary_operators:
out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr)
unit = self._ufunc_registry[context[0]](u)
ret_class = type(self)
elif ufunc in binary_operators:
unit_operator = self._ufunc_registry[context[0]]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (preserve_units, comparison_unit,
arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class, raise_error=True)
unit = unit_operator(*units)
if unit_operator in (multiply_units, divide_units):
out_arr, out_arr, unit = handle_multiply_divide_units(
unit, units, out_arr, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc has not been added "
"to YTArray." % str(context[0]))
if unit is None:
out_arr = np.array(out_arr, copy=False)
return out_arr
out_arr.units = unit
if out_arr.size == 1:
return YTQuantity(np.array(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
return YTArray(np.array(out_arr), unit)
return ret_class(np.array(out_arr, copy=False), unit)
else: # numpy version equal to or newer than 1.13
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if 'out' in kwargs:
out_orig = kwargs.pop('out')
out = np.asarray(out_orig[0])
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == 'reduce':
power_sign = POWER_SIGN_MAPPING[ufunc]
if 'axis' in kwargs and kwargs['axis'] is not None:
unit = u**(power_sign*inp.shape[kwargs['axis']])
else:
unit = u**(power_sign*inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(
inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]),
out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(
unit, units, out, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been"
"added to YTArray." % (str(ufunc), len(inputs)))
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
def copy(self, order='C'):
return type(self)(np.copy(np.asarray(self)), self.units)
def __array_finalize__(self, obj):
if obj is None and hasattr(self, 'units'):
return
self.units = getattr(obj, 'units', NULL_UNIT)
def __pos__(self):
""" Posify the data. """
# this needs to be defined for all numpy versions, see
# numpy issue #9081
return type(self)(super(YTArray, self).__pos__(), self.units)
@return_arr
def dot(self, b, out=None):
return super(YTArray, self).dot(b), self.units*b.units
def __reduce__(self):
"""Pickle reduction method
See the documentation for the standard library pickle module:
http://docs.python.org/2/library/pickle.html
Unit metadata is encoded in the zeroth element of third element of the
returned tuple, itself a tuple used to restore the state of the ndarray.
This is always defined for numpy arrays.
"""
np_ret = super(YTArray, self).__reduce__()
obj_state = np_ret[2]
unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],)
new_ret = np_ret[:2] + unit_state + np_ret[3:]
return new_ret
def __setstate__(self, state):
"""Pickle setstate method
This is called inside pickle.read() and restores the unit data from the
metadata extracted in __reduce__ and then serialized by pickle.
"""
super(YTArray, self).__setstate__(state[1:])
try:
unit, lut = state[0]
except TypeError:
# this case happens when we try to load an old pickle file
# created before we serialized the unit symbol lookup table
# into the pickle file
unit, lut = str(state[0]), default_unit_symbol_lut.copy()
# need to fix up the lut if the pickle was saved prior to PR #1728
# when the pickle format changed
if len(lut['m']) == 2:
lut.update(default_unit_symbol_lut)
for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]:
lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}')
registry = UnitRegistry(lut=lut, add_default_symbols=False)
self.units = Unit(unit, registry=registry)
def __deepcopy__(self, memodict=None):
"""copy.deepcopy implementation
This is necessary for stdlib deepcopy of arrays and quantities.
"""
if memodict is None:
memodict = {}
ret = super(YTArray, self).__deepcopy__(memodict)
return type(self)(ret, copy.deepcopy(self.units))
class YTQuantity(YTArray):
"""
A scalar associated with a unit.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the quantity. Powers must be specified using python syntax
(cm**3, not cm^3).
registry : A UnitRegistry object
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data.
Examples
--------
>>> from yt import YTQuantity
>>> a = YTQuantity(1, 'cm')
>>> b = YTQuantity(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTQuantity(12, 'g/cm**3')
>>> np.abs(a)
12 g/cm**3
and strip them when it would be annoying to deal with them.
>>> print(np.log10(a))
1.07918124605
YTQuantity is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
This is equivalent to:
>>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
def __new__(cls, input_scalar, input_units=None, registry=None,
dtype=np.float64, bypass_validation=False):
if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)):
raise RuntimeError("YTQuantity values must be numeric")
ret = YTArray.__new__(cls, input_scalar, input_units, registry,
dtype=dtype, bypass_validation=bypass_validation)
if ret.size > 1:
raise RuntimeError("YTQuantity instances must be scalars")
return ret
def __repr__(self):
return str(self)
def validate_numpy_wrapper_units(v, arrs):
if not any(isinstance(a, YTArray) for a in arrs):
return v
if not all(isinstance(a, YTArray) for a in arrs):
raise RuntimeError("Not all of your arrays are YTArrays.")
a1 = arrs[0]
if not all(a.units == a1.units for a in arrs[1:]):
raise RuntimeError("Your arrays must have identical units.")
v.units = a1.units
return v
def uconcatenate(arrs, axis=0):
"""Concatenate a sequence of arrays.
This wrapper around numpy.concatenate preserves units. All input arrays must
have the same units. See the documentation of numpy.concatenate for full
details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uconcatenate((A, B))
YTArray([ 1., 2., 3., 2., 3., 4.]) cm
"""
v = np.concatenate(arrs, axis=axis)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Applies the cross product to two YT arrays.
This wrapper around numpy.cross preserves units.
See the documentation of numpy.cross for full
details.
"""
v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
units = arr1.units * arr2.units
arr = YTArray(v, units, registry=registry)
return arr
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uintersect1d(A, B)
YTArray([ 2., 3.]) cm
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def unorm(data, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm that preserves units
This is a wrapper around np.linalg.norm that preserves units. See
the documentation for that function for descriptions of the keyword
arguments.
The keepdims argument is ignored if the version of numpy installed is
older than numpy 1.10.0.
"""
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
norm = np.linalg.norm(data, ord=ord, axis=axis)
else:
norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
if norm.shape == ():
return YTQuantity(norm, data.units)
return YTArray(norm, data.units)
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
"""
dot = np.dot(op1.d, op2.d)
units = op1.units*op2.units
if dot.shape == ():
return YTQuantity(dot, units)
return YTArray(dot, units)
def uvstack(arrs):
"""Stack arrays in sequence vertically (row wise) while preserving units
This is a wrapper around np.vstack that preserves units.
"""
v = np.vstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def uhstack(arrs):
"""Stack arrays in sequence horizontally (column wise) while preserving units
This is a wrapper around np.hstack that preserves units.
"""
v = np.hstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ustack(arrs, axis=0):
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the
first dimension and if ``axis=-1`` it will be the last dimension.
This is a wrapper around np.stack that preserves units.
"""
v = np.stack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0],field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == 'particle':
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def get_binary_op_return_class(cls1, cls2):
if cls1 is cls2:
return cls1
if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)):
return cls2
if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)):
return cls1
if issubclass(cls1, YTQuantity):
return cls2
if issubclass(cls2, YTQuantity):
return cls1
if issubclass(cls1, cls2):
return cls1
if issubclass(cls2, cls1):
return cls2
else:
raise RuntimeError("Undefined operation for a YTArray subclass. "
"Received operand types (%s) and (%s)" % (cls1, cls2))
def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
r"""
Load YTArrays with unit information from a text file. Each row in the
text file must have the same number of values.
Parameters
----------
fname : str
Filename to read.
dtype : data-type, optional
Data-type of the resulting array; default: float.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
Examples
--------
>>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
"""
f = open(fname, 'r')
next_one = False
units = []
num_cols = -1
for line in f.readlines():
words = line.strip().split()
if len(words) == 0:
continue
if line[0] == comments:
if next_one:
units = words[1:]
if len(words) == 2 and words[1] == "Units":
next_one = True
else:
# Here we catch the first line of numbers
try:
col_words = line.strip().split(delimiter)
for word in col_words:
float(word)
num_cols = len(col_words)
break
except ValueError:
mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
f.close()
if len(units) != num_cols:
mylog.warning("Malformed or incomplete units header. Arrays will be "
"dimensionless!")
units = ["dimensionless"]*num_cols
arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
delimiter=delimiter, converters=None,
unpack=True, usecols=usecols, ndmin=0)
if usecols is not None:
units = [units[col] for col in usecols]
mylog.info("Array units: %s" % ", ".join(units))
return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
footer='', comments='#'):
r"""
Write YTArrays with unit information to a text file.
Parameters
----------
fname : str
The file to write the YTArrays to.
arrays : list of YTArrays or single YTArray
The array(s) to write to the file.
fmt : str or sequence of strs, optional
A single format (%10.5f), or a sequence of formats.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file, before the
unit header.
footer : str, optional
String that will be written at the end of the file.
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``yt.loadtxt``.
Examples
--------
>>> sp = ds.sphere("c", (100,"kpc"))
>>> a = sp["density"]
>>> b = sp["temperature"]
>>> c = sp["velocity_x"]
>>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
"""
if not isinstance(arrays, list):
arrays = [arrays]
units = []
for array in arrays:
if hasattr(array, "units"):
units.append(str(array.units))
else:
units.append("dimensionless")
if header != '':
header += '\n'
header += " Units\n " + '\t'.join(units)
np.savetxt(fname, np.transpose(arrays), header=header,
fmt=fmt, delimiter=delimiter, footer=footer,
newline='\n', comments=comments)
| 35.106849
| 119
| 0.592836
| 7,991
| 64,070
| 4.584908
| 0.10662
| 0.0113
| 0.017905
| 0.019815
| 0.411185
| 0.36918
| 0.32005
| 0.270975
| 0.240024
| 0.217288
| 0
| 0.013325
| 0.307726
| 64,070
| 1,824
| 120
| 35.126096
| 0.812711
| 0.282129
| 0
| 0.236196
| 0
| 0
| 0.029236
| 0
| 0.001022
| 0
| 0
| 0
| 0
| 1
| 0.122699
| false
| 0.02045
| 0.026585
| 0.023517
| 0.312883
| 0.001022
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a306266dca5739cfacd9015b52dba19c79b8c41
| 1,548
|
py
|
Python
|
src/posts/api/serializers.py
|
MahmoudMagdi20/django_rest_blog_api
|
e1969c75e20b4d807baf26051924a0b99a23b4dc
|
[
"MIT"
] | null | null | null |
src/posts/api/serializers.py
|
MahmoudMagdi20/django_rest_blog_api
|
e1969c75e20b4d807baf26051924a0b99a23b4dc
|
[
"MIT"
] | null | null | null |
src/posts/api/serializers.py
|
MahmoudMagdi20/django_rest_blog_api
|
e1969c75e20b4d807baf26051924a0b99a23b4dc
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from posts.models import Post
class PostCreateUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = [
#'id',
'title',
#'slug',
'content',
'publish',
]
post_detail_url = serializers.HyperlinkedIdentityField(
view_name='posts-api:detail',
lookup_field='slug',
)
class PostDetailSerializer(serializers.ModelSerializer):
url = post_detail_url
user = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
html = serializers.SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'id',
'title',
'slug',
'content',
'publish',
'user',
'image',
'html',
]
def get_html(self, obj):
return obj.get_markdown()
def get_user(self, obj):
return str(obj.user.username)
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
class PostListSerializer(serializers.ModelSerializer):
url = post_detail_url
user = serializers.SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'user',
'title',
'content',
'publish',
]
def get_user(self, obj):
return str(obj.user.username)
| 22.434783
| 62
| 0.541344
| 133
| 1,548
| 6.195489
| 0.330827
| 0.15534
| 0.050971
| 0.065534
| 0.48301
| 0.393204
| 0.393204
| 0.393204
| 0.288835
| 0.099515
| 0
| 0
| 0.360465
| 1,548
| 68
| 63
| 22.764706
| 0.832323
| 0.007752
| 0
| 0.535714
| 0
| 0
| 0.0691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0.053571
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a31cb53c607d4ae46c2c3f0ae523a2030f68afc
| 1,085
|
py
|
Python
|
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
from tir import Webapp
import unittest
class GTPA036E(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGTP", "05/08/2020", "T1", "D MG 01 ")
inst.oHelper.Program('GTPA036')
def test_GTPA036E_CT001(self):
self.oHelper.SetButton('Avançar')
self.oHelper.ClickLabel("Arquivo não formatado")
self.oHelper.SetButton('Avançar')
self.oHelper.SetValue('XXX_DATADE', '02/08/2020')
self.oHelper.SetValue('XXX_DATATE', '07/08/2020')
self.oHelper.ScrollGrid(column='Agência', match_value='000048', grid_number=1)
'''self.oHelper.ClickGridCell("", row=2, grid_number=1)'''
self.oHelper.ClickBox("", contents_list='', select_all=False, grid_number=1)
self.oHelper.SetButton('Concluir')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| 35
| 94
| 0.62765
| 120
| 1,085
| 5.525
| 0.541667
| 0.182504
| 0.120664
| 0.067873
| 0.214178
| 0.11463
| 0
| 0
| 0
| 0
| 0
| 0.058753
| 0.231336
| 1,085
| 30
| 95
| 36.166667
| 0.736211
| 0
| 0
| 0.166667
| 0
| 0
| 0.140214
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a32bc170cadd36fc1306d343ea0e49f3379160d
| 1,654
|
py
|
Python
|
src/collectors/heartbeat/heartbeat.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 2
|
2016-11-17T13:17:50.000Z
|
2017-03-28T19:42:04.000Z
|
src/collectors/heartbeat/heartbeat.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 62
|
2016-09-30T14:04:52.000Z
|
2021-04-22T21:22:28.000Z
|
src/collectors/heartbeat/heartbeat.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 4
|
2017-01-24T14:44:56.000Z
|
2021-03-03T17:14:19.000Z
|
# coding=utf-8
"""
Send a value of 1 as a heartbeat every time this collector is invoked.
#### Dependencies
None
#### Usage
Add the collector config as :
enabled = True
path = netuitive
Metrics are collected as :
- metrics.heartbeat
Netuitive Change History
========================
DVG 2016/11/14 Initial version.
"""
import diamond.collector
from diamond.utils.config import load_config as load_server_config
try:
import netuitive
except ImportError:
netuitive = None
class HeartbeatCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(HeartbeatCollector, self).__init__(*args, **kwargs)
self.hostname = self.get_hostname()
self.ttl = self.config['ttl']
self.connection_timeout = 5
if not netuitive:
self.log.error('netuitive import failed. Heartbeat collector disabled')
self.enabled = False
return
try:
self.version = self._get_version()
if 'netuitive_connection_timeout' in self.config:
self.connection_timeout = int(self.config['netuitive_connection_timeout'])
self.api = netuitive.Client(url=self.config['netuitive_url'],
api_key=self.config['netuitive_api_key'],
agent=self.version,
connection_timeout=self.connection_timeout)
except Exception as e:
self.log.debug(e)
def collect(self):
check = netuitive.Check('heartbeat', self.hostname, self.ttl)
self.api.post_check(check)
| 25.84375
| 90
| 0.615478
| 181
| 1,654
| 5.475138
| 0.447514
| 0.102926
| 0.063572
| 0.038345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009267
| 0.282346
| 1,654
| 63
| 91
| 26.253968
| 0.825611
| 0.198307
| 0
| 0.068966
| 0
| 0
| 0.114655
| 0.042521
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a33b4fb181d675d2537be4a920a504933aa3c82
| 6,599
|
py
|
Python
|
process_script/stat.py
|
vitorebatista/AVEMH
|
1c0bea3ae6c35729b80ba49b9663ce83ea43922d
|
[
"MIT"
] | 2
|
2020-11-11T14:02:53.000Z
|
2020-12-10T00:10:50.000Z
|
process_script/stat.py
|
vitorebatista/AVEMH
|
1c0bea3ae6c35729b80ba49b9663ce83ea43922d
|
[
"MIT"
] | null | null | null |
process_script/stat.py
|
vitorebatista/AVEMH
|
1c0bea3ae6c35729b80ba49b9663ce83ea43922d
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import sys
markets = ["hangseng", "dax", "ftse", "sp", "nikkei"]
market = markets[int(sys.argv[1])-1]
# read GD data file
dat = pd.read_csv("./num_res/{}.GD.csv".format(market))
# split into two experiments
exp1_GD = dat[dat.columns[:5]]
exp2_GD = dat[dat.columns[5:]]
# calculate statistics
stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()])
stat1_GD.index = ["Best", "Median", "Std."]
stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()])
stat2_GD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_GD = stat1_GD.loc["Median"].sort_values()
best1_GD = list(meds1_GD.index[:2])
meds2_GD = stat2_GD.loc["Median"].sort_values()
best2_GD = list(meds2_GD.index[:2])
print("{}.GD:".format(market), best1_GD[0], best1_GD[1])
# print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error
# read Spacing data file
dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market))
# split into two experiments
exp1_Spacing = dat[dat.columns[:5]]
exp2_Spacing = dat[dat.columns[5:]]
# calculate statistics
stat1_Spacing = pd.DataFrame(
[exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()])
stat1_Spacing.index = ["Best", "Median", "Std."]
stat2_Spacing = pd.DataFrame(
[exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()])
stat2_Spacing.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Spacing = stat1_Spacing.loc["Median"].sort_values()
best1_Spacing = list(meds1_Spacing.index[:2])
meds2_Spacing = stat2_Spacing.loc["Median"].sort_values()
best2_Spacing = list(meds2_Spacing.index[:2])
print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1])
# print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error
# read MaxSpread data file
dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market))
# split into two experiments
exp1_MaxSpread = dat[dat.columns[:5]]
exp2_MaxSpread = dat[dat.columns[5:]]
# calculate statistics
stat1_MaxSpread = pd.DataFrame(
[exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()])
stat1_MaxSpread.index = ["Best", "Median", "Std."]
stat2_MaxSpread = pd.DataFrame(
[exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()])
stat2_MaxSpread.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False)
best1_MaxSpread = list(meds1_MaxSpread.index[:2])
meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False)
best2_MaxSpread = list(meds2_MaxSpread.index[:2])
print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1])
# print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error
# read Delta data file
dat = pd.read_csv("./num_res/{}.Delta.csv".format(market))
# split into two experiments
exp1_Delta = dat[dat.columns[:5]]
exp2_Delta = dat[dat.columns[5:]]
# calculate statistics
stat1_Delta = pd.DataFrame(
[exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()])
stat1_Delta.index = ["Best", "Median", "Std."]
stat2_Delta = pd.DataFrame(
[exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()])
stat2_Delta.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Delta = stat1_Delta.loc["Median"].sort_values()
best1_Delta = list(meds1_Delta.index[:2])
meds2_Delta = stat2_Delta.loc["Median"].sort_values()
best2_Delta = list(meds2_Delta.index[:2])
print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1])
# print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error
# read IGD data file
dat = pd.read_csv("./num_res/{}.IGD.csv".format(market))
# split into two experiments
exp1_IGD = dat[dat.columns[:5]]
exp2_IGD = dat[dat.columns[5:]]
# calculate statistics
stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()])
stat1_IGD.index = ["Best", "Median", "Std."]
stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()])
stat2_IGD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_IGD = stat1_IGD.loc["Median"].sort_values()
best1_IGD = list(meds1_IGD.index[:2])
meds2_IGD = stat2_IGD.loc["Median"].sort_values()
best2_IGD = list(meds2_IGD.index[:2])
print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1])
# print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error
# read Hypervolume data file
dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market))
# split into two experiments
exp1_Hypervolume = dat[dat.columns[:5]]
exp2_Hypervolume = dat[dat.columns[5:]]
# calculate statistics
stat1_Hypervolume = pd.DataFrame(
[exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()])
stat1_Hypervolume.index = ["Best", "Median", "Std."]
stat2_Hypervolume = pd.DataFrame(
[exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()])
stat2_Hypervolume.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values(
ascending=False)
best1_Hypervolume = list(meds1_Hypervolume.index[:2])
meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values(
ascending=False)
best2_Hypervolume = list(meds2_Hypervolume.index[:2])
print("{}.Hypervolume:".format(market),
best1_Hypervolume[0], best1_Hypervolume[1])
# print("{}.Hypervolume:".format(market),
# best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error
print("{}\n----------------------------------------------".format(market))
pd.options.display.float_format = '{:.2e}'.format
stat1_overall = pd.concat(
[stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume])
stat2_overall = pd.concat(
[stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume])
arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread",
"Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"],
stat1_overall.index
]
index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""])
stat1_overall.index = index
stat2_overall.index = index
print(stat1_overall)
print("----------------------------------------------")
print(stat2_overall)
| 39.279762
| 105
| 0.690711
| 893
| 6,599
| 4.892497
| 0.091825
| 0.052186
| 0.035706
| 0.038453
| 0.425956
| 0.262303
| 0.262303
| 0.166171
| 0.072786
| 0.072786
| 0
| 0.036973
| 0.122897
| 6,599
| 167
| 106
| 39.51497
| 0.717865
| 0.177754
| 0
| 0.019231
| 0
| 0
| 0.130626
| 0.037541
| 0
| 0
| 0
| 0.005988
| 0
| 1
| 0
| false
| 0
| 0.028846
| 0
| 0.028846
| 0.096154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3465198ac8a54def9b1ff02f89cdbec3079889
| 4,239
|
py
|
Python
|
cwl_flask.py
|
Sage-Bionetworks/workflow-service
|
8b5dc0afe9ea0972014cdf48a693ee6f893cfe5e
|
[
"Apache-2.0"
] | 1
|
2019-11-14T23:46:23.000Z
|
2019-11-14T23:46:23.000Z
|
cwl_flask.py
|
Sage-Bionetworks/workflow-service
|
8b5dc0afe9ea0972014cdf48a693ee6f893cfe5e
|
[
"Apache-2.0"
] | null | null | null |
cwl_flask.py
|
Sage-Bionetworks/workflow-service
|
8b5dc0afe9ea0972014cdf48a693ee6f893cfe5e
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, Response, request, redirect
import subprocess
import tempfile
import json
import yaml
import signal
import threading
import time
import copy
app = Flask(__name__)
jobs_lock = threading.Lock()
jobs = []
class Job(threading.Thread):
def __init__(self, jobid, path, inputobj):
super(Job, self).__init__()
self.jobid = jobid
self.path = path
self.inputobj = inputobj
self.updatelock = threading.Lock()
self.begin()
def begin(self):
loghandle, self.logname = tempfile.mkstemp()
with self.updatelock:
self.outdir = tempfile.mkdtemp()
self.proc = subprocess.Popen(["cwl-runner", self.path, "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=loghandle,
close_fds=True,
cwd=self.outdir)
self.status = {
"id": "%sjobs/%i" % (request.url_root, self.jobid),
"log": "%sjobs/%i/log" % (request.url_root, self.jobid),
"run": self.path,
"state": "Running",
"input": json.loads(self.inputobj),
"output": None}
def run(self):
self.stdoutdata, self.stderrdata = self.proc.communicate(self.inputobj)
if self.proc.returncode == 0:
outobj = yaml.load(self.stdoutdata, Loader=yaml.FullLoader)
with self.updatelock:
self.status["state"] = "Success"
self.status["output"] = outobj
else:
with self.updatelock:
self.status["state"] = "Failed"
def getstatus(self):
with self.updatelock:
return self.status.copy()
def cancel(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGQUIT)
with self.updatelock:
self.status["state"] = "Canceled"
def pause(self):
if self.status["state"] == "Running":
self.proc.send_signal(signal.SIGTSTP)
with self.updatelock:
self.status["state"] = "Paused"
def resume(self):
if self.status["state"] == "Paused":
self.proc.send_signal(signal.SIGCONT)
with self.updatelock:
self.status["state"] = "Running"
@app.route("/run", methods=['POST'])
def runworkflow():
path = request.args["wf"]
with jobs_lock:
jobid = len(jobs)
job = Job(jobid, path, request.stream.read())
job.start()
jobs.append(job)
return redirect("/jobs/%i" % jobid, code=303)
@app.route("/jobs/<int:jobid>", methods=['GET', 'POST'])
def jobcontrol(jobid):
with jobs_lock:
job = jobs[jobid]
if request.method == 'POST':
action = request.args.get("action")
if action:
if action == "cancel":
job.cancel()
elif action == "pause":
job.pause()
elif action == "resume":
job.resume()
status = job.getstatus()
return json.dumps(status, indent=4), 200, ""
def logspooler(job):
with open(job.logname, "r") as f:
while True:
r = f.read(4096)
if r:
yield r
else:
with job.updatelock:
if job.status["state"] != "Running":
break
time.sleep(1)
@app.route("/jobs/<int:jobid>/log", methods=['GET'])
def getlog(jobid):
with jobs_lock:
job = jobs[jobid]
return Response(logspooler(job))
@app.route("/jobs", methods=['GET'])
def getjobs():
with jobs_lock:
jobscopy = copy.copy(jobs)
def spool(jc):
yield "["
first = True
for j in jc:
if first:
yield json.dumps(j.getstatus(), indent=4)
first = False
else:
yield ", " + json.dumps(j.getstatus(), indent=4)
yield "]"
return Response(spool(jobscopy))
if __name__ == "__main__":
# app.debug = True
app.run()
| 28.641892
| 79
| 0.517103
| 444
| 4,239
| 4.867117
| 0.297297
| 0.050902
| 0.05553
| 0.061083
| 0.240629
| 0.180009
| 0.103656
| 0.048126
| 0.048126
| 0.048126
| 0
| 0.00547
| 0.353149
| 4,239
| 147
| 80
| 28.836735
| 0.78264
| 0.003774
| 0
| 0.147541
| 0
| 0
| 0.068704
| 0.004975
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106557
| false
| 0
| 0.07377
| 0
| 0.229508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a35e9528722fb698d7d9b2d769ceed182b29b73
| 1,265
|
py
|
Python
|
selective_merge_pdf.py
|
vs-slavchev/selective_merge_pdf
|
b24b4dbcaf1ffb8dc0924dafec56f94e452c1ebd
|
[
"MIT"
] | null | null | null |
selective_merge_pdf.py
|
vs-slavchev/selective_merge_pdf
|
b24b4dbcaf1ffb8dc0924dafec56f94e452c1ebd
|
[
"MIT"
] | null | null | null |
selective_merge_pdf.py
|
vs-slavchev/selective_merge_pdf
|
b24b4dbcaf1ffb8dc0924dafec56f94e452c1ebd
|
[
"MIT"
] | null | null | null |
from sys import argv
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
range_pattern = re.compile(r'(\d+)(\.\.|-)(\d+)')
comma_pattern = re.compile('\d+(,\d+)*')
def pages_args_to_array(pages_str):
groups = range_pattern.search(pages_str)
if groups:
start = int(groups.group(1))
end = int(groups.group(3))
return list(range(start, end + 1))
elif comma_pattern.search(pages_str):
return [int(d) for d in pages_str.split(',')]
else:
raise Exception('pages should be like 1,2,3 or 1-3, but was {}'
.format(pages_str))
if __name__ == '__main__':
assert(len(argv) > 1), "usage examle:\npython3 selective_merge_pdf.py file1.pdf 1-3 file2.pdf 3,4,10 file1.pdf 50"
assert(len(argv) % 2 == 1), "invalid arguments; supply page numbers after each pdf name"
files_names = argv[1::2]
pages_args = argv[2::2]
pdf_writer = PdfFileWriter()
for file_name, pages in zip(files_names, pages_args):
pdf_reader = PdfFileReader(file_name)
last_page_index = pdf_reader.getNumPages()
pages = pages_args_to_array(pages)
pages_to_add = list(filter(lambda i: i >= 0 and i <= last_page_index, pages))
for page in pages_to_add:
pdf_writer.addPage(pdf_reader.getPage(page - 1))
with open("merged.pdf", 'wb') as out:
pdf_writer.write(out)
| 31.625
| 115
| 0.709091
| 208
| 1,265
| 4.096154
| 0.447115
| 0.046948
| 0.037559
| 0.037559
| 0.049296
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.146245
| 1,265
| 39
| 116
| 32.435897
| 0.761111
| 0
| 0
| 0
| 0
| 0.032258
| 0.190514
| 0.017391
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.032258
| false
| 0
| 0.096774
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a366dc7ea5c7f093418a07f29237983fc6bf2d7
| 4,031
|
py
|
Python
|
vp/scoring.py
|
romack77/vp-toolbox
|
2677b78b80d0b4794735f3ee9bd70403c6b884e6
|
[
"MIT"
] | 10
|
2019-08-03T06:29:47.000Z
|
2022-02-05T03:08:15.000Z
|
vp/scoring.py
|
romack77/vp-toolbox
|
2677b78b80d0b4794735f3ee9bd70403c6b884e6
|
[
"MIT"
] | null | null | null |
vp/scoring.py
|
romack77/vp-toolbox
|
2677b78b80d0b4794735f3ee9bd70403c6b884e6
|
[
"MIT"
] | 3
|
2019-01-22T12:19:05.000Z
|
2021-02-25T16:58:59.000Z
|
import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
def gt(x):
return ground_truth_horizon[0] * x + ground_truth_horizon[1]
def dt(x):
return detected_horizon[0] * x + detected_horizon[1]
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
| 34.161017
| 86
| 0.675019
| 618
| 4,031
| 4.142395
| 0.190939
| 0.094531
| 0.060156
| 0.015625
| 0.420313
| 0.414063
| 0.389844
| 0.290625
| 0.271094
| 0.246094
| 0
| 0.010224
| 0.247829
| 4,031
| 117
| 87
| 34.452991
| 0.834103
| 0.359712
| 0
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113208
| false
| 0
| 0.037736
| 0.037736
| 0.301887
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a36be6ff9e65c7b1ffad1c7ff8f47b4ee0f6df3
| 4,175
|
py
|
Python
|
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py
|
collassubmission91/CompoSuite-Code
|
ac544efb68a11ed8a483b0932975c4949f0cec90
|
[
"MIT"
] | null | null | null |
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py
|
collassubmission91/CompoSuite-Code
|
ac544efb68a11ed8a483b0932975c4949f0cec90
|
[
"MIT"
] | null | null | null |
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py
|
collassubmission91/CompoSuite-Code
|
ac544efb68a11ed8a483b0932975c4949f0cec90
|
[
"MIT"
] | null | null | null |
import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
args = parser.parse_args()
np.random.seed(args.seed)
task_list = np.random.choice(256, num_procs(), replace=False)
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
# args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle)
args.exp_name = 'MTL_{}'.format(len(task_list))
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, log_std_init=args.log_std_init), seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
| 43.489583
| 199
| 0.697725
| 606
| 4,175
| 4.592409
| 0.273927
| 0.084082
| 0.158821
| 0.030543
| 0.189723
| 0.151635
| 0.151635
| 0.128638
| 0.050305
| 0
| 0
| 0.014135
| 0.135808
| 4,175
| 95
| 200
| 43.947368
| 0.757206
| 0.046228
| 0
| 0
| 0
| 0
| 0.10804
| 0.00603
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.136986
| 0
| 0.178082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3711b515419fb6ad721023cf62fe24b0ba8280
| 15,121
|
py
|
Python
|
igvm/cli.py
|
innogames/igvm
|
6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e
|
[
"MIT"
] | 14
|
2018-02-15T14:09:54.000Z
|
2021-07-19T01:55:58.000Z
|
igvm/cli.py
|
innogames/igvm
|
6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e
|
[
"MIT"
] | 129
|
2018-02-19T09:47:18.000Z
|
2022-03-02T14:08:10.000Z
|
igvm/cli.py
|
innogames/igvm
|
6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e
|
[
"MIT"
] | 10
|
2018-02-16T15:56:59.000Z
|
2021-05-14T23:31:31.000Z
|
"""igvm - The command line interface
Copyright (c) 2017 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser, _SubParsersAction
from logging import StreamHandler, root as root_logger
import time
from fabric.network import disconnect_all
from igvm.commands import (
change_address,
disk_set,
evacuate,
host_info,
mem_set,
vcpu_set,
vm_build,
vm_delete,
vm_migrate,
vm_rename,
vm_restart,
vm_start,
vm_stop,
vm_sync, vm_define,
)
from igvm.libvirt import close_virtconns
class ColorFormatters():
BOLD = '\033[1m{}\033[0m'
WARNING = '\033[1;33m{}\033[0m'
ERROR = '\033[1;31m{}\033[0m'
CRITICAL = '\033[1;41m{}\033[0m'
class IGVMArgumentParser(ArgumentParser):
def format_help(self):
if not any(isinstance(a, _SubParsersAction) for a in self._actions):
return super(IGVMArgumentParser, self).format_help()
out = []
out.append(ColorFormatters.BOLD.format(__doc__))
out.append('Available commands:\n')
subparsers_actions = [
action for action in self._actions
if isinstance(action, _SubParsersAction)
]
# There will probably only be one subparser_action, but better safe
# than sorry.
for subparsers_action in subparsers_actions:
# Get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
out.append(ColorFormatters.BOLD.format(choice))
if subparser.get_default('func').__doc__:
out.append('\n'.join(
'\t{}'.format(l.strip()) for l in subparser
.get_default('func').__doc__.strip().splitlines()
))
out.append('\n\t{}'.format(subparser.format_usage()))
return '\n'.join(out)
class IGVMLogHandler(StreamHandler):
"""Extend StreamHandler to format messages short-cutting Formatters"""
def __init__(self, *args, **kwargs):
super(IGVMLogHandler, self).__init__(*args, **kwargs)
self.isatty = self.stream.isatty()
def format(self, record):
level = record.levelname
msg = '{}: {}: {}'.format(level, record.name, record.getMessage())
if self.isatty and level in vars(ColorFormatters):
msg = getattr(ColorFormatters, level).format(msg)
return msg
def parse_args():
top_parser = IGVMArgumentParser('igvm')
top_parser.add_argument('--silent', '-s', action='count', default=0)
top_parser.add_argument('--verbose', '-v', action='count', default=0)
subparsers = top_parser.add_subparsers(help='Actions')
subparser = subparsers.add_parser(
'build',
description=vm_build.__doc__,
)
subparser.set_defaults(func=vm_build)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--postboot',
metavar='postboot_script',
help='Run postboot_script on the guest after first boot',
)
subparser.add_argument(
'--skip-puppet',
action='store_false',
dest='run_puppet',
help='Skip running puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow building on a Host which has the state online_reserved',
)
subparser.add_argument(
'--rebuild',
dest='rebuild',
action='store_true',
help='Rebuild already defined VM or build it if not defined',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'migrate',
description=vm_migrate.__doc__,
)
subparser.set_defaults(func=vm_migrate)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'hypervisor_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--run-puppet',
action='store_true',
help='Run puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Force offline migration',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
choices=('drbd', 'netcat', 'xfs'),
help=(
'Specify drbd (default), netcat or xfs transport to migrate '
'disk image'
),
)
subparser.add_argument(
'--no-shutdown',
action='store_true',
help=(
'Don\'t shutdown VM during offline migration, igvm will wait for'
' operator to shut down VM for 24h.'
),
)
subparser.add_argument(
'--enforce-vm-env',
dest='enforce_vm_env',
action='store_true',
help='Build or migrate VM only to a HV with the same environment of VM'
)
subparser.add_argument(
'--disk-size',
dest='disk_size',
type=int,
help='Resize disk of migrated VM. Expects new size in GiB. '
'Works only with --offline --offline-transport=xfs',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'change-address',
description=disk_set.__doc__,
)
subparser.set_defaults(func=change_address)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_address',
help=(
'New IPv4 address of VM'
)
)
subparser.add_argument(
'--offline',
action='store_true',
help='Perform IP address change offline',
)
subparser.add_argument(
'--migrate',
action='store_true',
help='Migrate VM to new HV while changing IP address',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
help=(
'Specify drbd (default) or netcat transport to migrate disk image'
),
)
subparser = subparsers.add_parser(
'disk-set',
description=disk_set.__doc__,
)
subparser.set_defaults(func=disk_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New disk size with an optional unit (default GiB). '
'Can be specified relative with "+". Only integers are allowed'
)
)
subparser = subparsers.add_parser(
'mem-set',
description=mem_set.__doc__,
)
subparser.set_defaults(func=mem_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New memory size with optional unit (default is MiB).'
'Only integers are allowed.'
),
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change memory, and restart VM',
)
subparser = subparsers.add_parser(
'vcpu-set',
description=vcpu_set.__doc__,
)
subparser.set_defaults(func=vcpu_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'count',
type=int,
help='New number of CPUs',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change CPUs, and restart VM',
)
subparser = subparsers.add_parser(
'start',
description=vm_start.__doc__,
)
subparser.set_defaults(func=vm_start)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--unretire',
nargs='?',
const='maintenance',
help='Unretire a VM, set it to given state, maintenance by default',
)
subparser = subparsers.add_parser(
'stop',
description=vm_stop.__doc__,
)
subparser.set_defaults(func=vm_stop)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Retire VM after stopping it',
)
subparser = subparsers.add_parser(
'restart',
description=vm_restart.__doc__,
)
subparser.set_defaults(func=vm_restart)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--no-redefine',
action='store_true',
help='Do not redefine the domain to use latest hypervisor settings',
)
subparser = subparsers.add_parser(
'delete',
description=vm_delete.__doc__,
)
subparser.set_defaults(func=vm_delete)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Set VM state to "retired" on Serveradmin instead of deleting',
)
subparser = subparsers.add_parser(
'info',
description=host_info.__doc__,
)
subparser.set_defaults(func=host_info)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'sync',
description=vm_sync.__doc__,
)
subparser.set_defaults(func=vm_sync)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'rename',
description=vm_rename.__doc__,
)
subparser.set_defaults(func=vm_rename)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_hostname',
help='New hostname',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, if running',
)
subparser = subparsers.add_parser(
'evacuate',
description=evacuate.__doc__,
)
subparser.set_defaults(func=evacuate)
subparser.add_argument(
'hv_hostname',
help='Hostname of the hypervisor',
)
subparser.add_argument(
'dst_hv_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--dry-run',
action='store_true',
help='Do not migrate but just print what would be done'
)
subparser.add_argument(
'--offline',
nargs='*',
help='Migrate VMs matching the given serveradmin function offline',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migrating to a host which has the state online_reserved',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'define',
description=vm_define.__doc__,
)
subparser.set_defaults(func=vm_define)
subparser.add_argument('vm_hostname', help='Hostname of the guest system')
return vars(top_parser.parse_args())
def main():
args = parse_args()
configure_root_logger(args.pop('silent'), args.pop('verbose'))
try:
args.pop('func')(**args)
finally:
# Fabric requires the disconnect function to be called after every
# use. We are also taking our chance to disconnect from
# the hypervisors.
disconnect_all()
close_virtconns()
# The underlying library of Fabric, Paramiko, raises an error, on
# destruction right after the disconnect function is called. We are
# sleeping for a little while to avoid this.
time.sleep(0.1)
def configure_root_logger(silent, verbose):
root_logger.addHandler(IGVMLogHandler())
# We are summing up the silent and verbose arguments in here. It
# is not really meaningful to use them both, but giving an error is not
# better. See Python logging library documentation [1] for the levels.
# Paramiko is overly verbose. We configure it for one level higher.
#
# [1] https://docs.python.org/library/logging.html#logging-levels
level = 20 + (silent - verbose) * 10
root_logger.setLevel(level)
root_logger.getChild('paramiko').setLevel(level + 10)
| 29.824458
| 79
| 0.612195
| 1,709
| 15,121
| 5.231714
| 0.194266
| 0.068896
| 0.120792
| 0.053126
| 0.506096
| 0.474779
| 0.425456
| 0.416508
| 0.389666
| 0.389666
| 0
| 0.005266
| 0.284174
| 15,121
| 506
| 80
| 29.883399
| 0.820769
| 0.058925
| 0
| 0.433708
| 0
| 0
| 0.315567
| 0.001619
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013483
| false
| 0
| 0.01573
| 0
| 0.053933
| 0.004494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a390498151447698302dd1d056f6ca3842fd3c6
| 987
|
py
|
Python
|
test/test_data_processor/test_condition_generation_dataset.py
|
puraminy/OpenPrompt
|
49f0ed9719bb6285e94c746de4511991c848492c
|
[
"Apache-2.0"
] | 979
|
2021-09-30T15:32:58.000Z
|
2022-03-31T11:23:03.000Z
|
test/test_data_processor/test_condition_generation_dataset.py
|
Spritebai/OpenPrompt
|
bd9ea544ab144d94af32d245101ba35c9d5a5a65
|
[
"Apache-2.0"
] | 104
|
2021-10-01T07:56:33.000Z
|
2022-03-31T14:39:09.000Z
|
test/test_data_processor/test_condition_generation_dataset.py
|
Spritebai/OpenPrompt
|
bd9ea544ab144d94af32d245101ba35c9d5a5a65
|
[
"Apache-2.0"
] | 121
|
2021-09-30T16:09:53.000Z
|
2022-03-31T09:39:34.000Z
|
import os, sys
from os.path import dirname as d
from os.path import abspath, join
root_dir = d(d(d(abspath(__file__))))
sys.path.append(root_dir)
from openprompt.data_utils.conditional_generation_dataset import PROCESSORS
base_path = os.path.join(root_dir, "datasets/CondGen")
def test_WebNLGProcessor():
dataset_name = "webnlg_2017"
dataset_path = os.path.join(base_path, dataset_name)
processor = PROCESSORS[dataset_name.lower()]()
train_dataset = processor.get_train_examples(dataset_path)
valid_dataset = processor.get_train_examples(dataset_path)
test_dataset = processor.get_test_examples(dataset_path)
assert len(train_dataset) == 18025
assert len(valid_dataset) == 18025
assert len(test_dataset) == 4928
assert test_dataset[0].text_a == " | Abilene_Regional_Airport : cityServed : Abilene,_Texas"
assert test_dataset[0].text_b == ""
assert test_dataset[0].tgt_text == "Abilene, Texas is served by the Abilene regional airport."
| 41.125
| 98
| 0.761905
| 139
| 987
| 5.107914
| 0.388489
| 0.077465
| 0.080282
| 0.076056
| 0.183099
| 0.121127
| 0.121127
| 0
| 0
| 0
| 0
| 0.024677
| 0.137791
| 987
| 23
| 99
| 42.913043
| 0.809636
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.024316
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a393fec60ca724f475a9fdf13a20c1df07768c4
| 5,354
|
py
|
Python
|
BaseTools/Source/Python/Common/BuildToolError.py
|
JayLeeCompal/EDKII_Git
|
de4800d50e1f357002bf77235d3bebabd0c00007
|
[
"MIT"
] | 1
|
2022-01-20T04:51:29.000Z
|
2022-01-20T04:51:29.000Z
|
BaseTools/Source/Python/Common/BuildToolError.py
|
JayLeeCompal/EDKII_Git
|
de4800d50e1f357002bf77235d3bebabd0c00007
|
[
"MIT"
] | 1
|
2022-01-21T06:19:02.000Z
|
2022-01-21T06:19:02.000Z
|
BaseTools/Source/Python/Common/BuildToolError.py
|
JayLeeCompal/EDKII_Git
|
de4800d50e1f357002bf77235d3bebabd0c00007
|
[
"MIT"
] | null | null | null |
## @file
# Standardized Error Hanlding infrastructures.
#
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
FILE_OPEN_FAILURE = 1
FILE_WRITE_FAILURE = 2
FILE_PARSE_FAILURE = 3
FILE_READ_FAILURE = 4
FILE_CREATE_FAILURE = 5
FILE_CHECKSUM_FAILURE = 6
FILE_COMPRESS_FAILURE = 7
FILE_DECOMPRESS_FAILURE = 8
FILE_MOVE_FAILURE = 9
FILE_DELETE_FAILURE = 10
FILE_COPY_FAILURE = 11
FILE_POSITIONING_FAILURE = 12
FILE_ALREADY_EXIST = 13
FILE_NOT_FOUND = 14
FILE_TYPE_MISMATCH = 15
FILE_CASE_MISMATCH = 16
FILE_DUPLICATED = 17
FILE_UNKNOWN_ERROR = 0x0FFF
OPTION_UNKNOWN = 0x1000
OPTION_MISSING = 0x1001
OPTION_CONFLICT = 0x1002
OPTION_VALUE_INVALID = 0x1003
OPTION_DEPRECATED = 0x1004
OPTION_NOT_SUPPORTED = 0x1005
OPTION_UNKNOWN_ERROR = 0x1FFF
PARAMETER_INVALID = 0x2000
PARAMETER_MISSING = 0x2001
PARAMETER_UNKNOWN_ERROR =0x2FFF
FORMAT_INVALID = 0x3000
FORMAT_NOT_SUPPORTED = 0x3001
FORMAT_UNKNOWN = 0x3002
FORMAT_UNKNOWN_ERROR = 0x3FFF
RESOURCE_NOT_AVAILABLE = 0x4000
RESOURCE_ALLOCATE_FAILURE = 0x4001
RESOURCE_FULL = 0x4002
RESOURCE_OVERFLOW = 0x4003
RESOURCE_UNDERRUN = 0x4004
RESOURCE_UNKNOWN_ERROR = 0x4FFF
ATTRIBUTE_NOT_AVAILABLE = 0x5000
ATTRIBUTE_GET_FAILURE = 0x5001
ATTRIBUTE_SET_FAILURE = 0x5002
ATTRIBUTE_UPDATE_FAILURE = 0x5003
ATTRIBUTE_ACCESS_DENIED = 0x5004
ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF
IO_NOT_READY = 0x6000
IO_BUSY = 0x6001
IO_TIMEOUT = 0x6002
IO_UNKNOWN_ERROR = 0x6FFF
COMMAND_FAILURE = 0x7000
PERMISSION_FAILURE = 0x8000
CODE_ERROR = 0xC0DE
AUTOGEN_ERROR = 0xF000
PARSER_ERROR = 0xF001
BUILD_ERROR = 0xF002
GENFDS_ERROR = 0xF003
ECC_ERROR = 0xF004
EOT_ERROR = 0xF005
DDC_ERROR = 0xF009
WARNING_AS_ERROR = 0xF006
MIGRATION_ERROR = 0xF010
PCD_VALIDATION_INFO_ERROR = 0xF011
PCD_VARIABLE_ATTRIBUTES_ERROR = 0xF012
PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR = 0xF013
ABORT_ERROR = 0xFFFE
UNKNOWN_ERROR = 0xFFFF
## Error message of each error code
gErrorMessage = {
FILE_NOT_FOUND : "File/directory not found in workspace",
FILE_OPEN_FAILURE : "File open failure",
FILE_WRITE_FAILURE : "File write failure",
FILE_PARSE_FAILURE : "File parse failure",
FILE_READ_FAILURE : "File read failure",
FILE_CREATE_FAILURE : "File create failure",
FILE_CHECKSUM_FAILURE : "Invalid checksum of file",
FILE_COMPRESS_FAILURE : "File compress failure",
FILE_DECOMPRESS_FAILURE : "File decompress failure",
FILE_MOVE_FAILURE : "File move failure",
FILE_DELETE_FAILURE : "File delete failure",
FILE_COPY_FAILURE : "File copy failure",
FILE_POSITIONING_FAILURE: "Failed to seeking position",
FILE_ALREADY_EXIST : "File or directory already exists",
FILE_TYPE_MISMATCH : "Incorrect file type",
FILE_CASE_MISMATCH : "File name case mismatch",
FILE_DUPLICATED : "Duplicated file found",
FILE_UNKNOWN_ERROR : "Unknown error encountered on file",
OPTION_UNKNOWN : "Unknown option",
OPTION_MISSING : "Missing option",
OPTION_CONFLICT : "Conflict options",
OPTION_VALUE_INVALID : "Invalid value of option",
OPTION_DEPRECATED : "Deprecated option",
OPTION_NOT_SUPPORTED : "Unsupported option",
OPTION_UNKNOWN_ERROR : "Unknown error when processing options",
PARAMETER_INVALID : "Invalid parameter",
PARAMETER_MISSING : "Missing parameter",
PARAMETER_UNKNOWN_ERROR : "Unknown error in parameters",
FORMAT_INVALID : "Invalid syntax/format",
FORMAT_NOT_SUPPORTED : "Not supported syntax/format",
FORMAT_UNKNOWN : "Unknown format",
FORMAT_UNKNOWN_ERROR : "Unknown error in syntax/format ",
RESOURCE_NOT_AVAILABLE : "Not available",
RESOURCE_ALLOCATE_FAILURE : "Allocate failure",
RESOURCE_FULL : "Full",
RESOURCE_OVERFLOW : "Overflow",
RESOURCE_UNDERRUN : "Underrun",
RESOURCE_UNKNOWN_ERROR : "Unknown error",
ATTRIBUTE_NOT_AVAILABLE : "Not available",
ATTRIBUTE_GET_FAILURE : "Failed to retrieve",
ATTRIBUTE_SET_FAILURE : "Failed to set",
ATTRIBUTE_UPDATE_FAILURE: "Failed to update",
ATTRIBUTE_ACCESS_DENIED : "Access denied",
ATTRIBUTE_UNKNOWN_ERROR : "Unknown error when accessing",
COMMAND_FAILURE : "Failed to execute command",
IO_NOT_READY : "Not ready",
IO_BUSY : "Busy",
IO_TIMEOUT : "Timeout",
IO_UNKNOWN_ERROR : "Unknown error in IO operation",
UNKNOWN_ERROR : "Unknown error",
}
## Exception indicating a fatal error
class FatalError(Exception):
pass
if __name__ == "__main__":
pass
| 33.886076
| 85
| 0.699664
| 617
| 5,354
| 5.748784
| 0.356564
| 0.081195
| 0.042853
| 0.05413
| 0.11841
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056307
| 0.240381
| 5,354
| 157
| 86
| 34.101911
| 0.815835
| 0.110758
| 0
| 0.016529
| 0
| 0
| 0.207588
| 0
| 0
| 0
| 0.0628
| 0
| 0
| 1
| 0
| false
| 0.016529
| 0
| 0
| 0.008264
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3943aef4b92eda2997e8228a72ccdd4b255c3d
| 1,360
|
py
|
Python
|
datasets/SUN397EncodbleDataset.py
|
allenai/ViRB
|
fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b
|
[
"Apache-2.0"
] | 26
|
2021-05-19T13:49:53.000Z
|
2022-02-10T16:33:47.000Z
|
datasets/SUN397EncodbleDataset.py
|
allenai/ViRB
|
fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b
|
[
"Apache-2.0"
] | null | null | null |
datasets/SUN397EncodbleDataset.py
|
allenai/ViRB
|
fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b
|
[
"Apache-2.0"
] | 1
|
2021-06-07T02:55:30.000Z
|
2021-06-07T02:55:30.000Z
|
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
class SUN397EncodableDataset(Dataset):
"""SUN397 encodable dataset class"""
def __init__(self, train=True):
super().__init__()
path = 'data/SUN397/train/*/*.jpg' if train else 'data/SUN397/test/*/*.jpg'
self.data = list(glob.glob(path))
random.shuffle(self.data)
cats = list(set([path.split("/")[3] for path in self.data]))
cats.sort()
self.labels = torch.LongTensor([cats.index(path.split("/")[3]) for path in self.data])
self.preprocessor = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if len(self.encoded_data) == 0:
return self.preprocessor(Image.open(self.data[idx]).convert('RGB')), self.labels[idx]
return self.encoded_data[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
def num_classes(self):
return int(max(self.labels) + 1)
| 34
| 97
| 0.625735
| 179
| 1,360
| 4.636872
| 0.413408
| 0.048193
| 0.028916
| 0.031325
| 0.06506
| 0.06506
| 0.06506
| 0.06506
| 0
| 0
| 0
| 0.044719
| 0.227206
| 1,360
| 39
| 98
| 34.871795
| 0.745005
| 0.022059
| 0
| 0
| 0
| 0
| 0.047583
| 0.037009
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.193548
| 0.064516
| 0.483871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3be6996ac9517d3022400855065d32ff7ed3c0
| 1,359
|
py
|
Python
|
scripts/bam-stats.py
|
varlociraptor/prosic-evaluation
|
f4f1950ba5c10bda0f41df2a8f519d98f779d736
|
[
"MIT"
] | 2
|
2020-04-29T00:56:09.000Z
|
2021-03-07T19:59:06.000Z
|
scripts/bam-stats.py
|
varlociraptor/prosic-evaluation
|
f4f1950ba5c10bda0f41df2a8f519d98f779d736
|
[
"MIT"
] | null | null | null |
scripts/bam-stats.py
|
varlociraptor/prosic-evaluation
|
f4f1950ba5c10bda0f41df2a8f519d98f779d736
|
[
"MIT"
] | 1
|
2022-03-15T12:23:03.000Z
|
2022-03-15T12:23:03.000Z
|
#!/usr/bin/env python
import sys
import numpy as np
import pandas as pd
import pysam
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import seaborn as sns
from functools import partial
tumor = pysam.AlignmentFile(snakemake.input[0], "rb")
normal = pysam.AlignmentFile(snakemake.input[1], "rb")
softclips = []
for i, rec in enumerate(normal):
if rec.is_supplementary or rec.is_unmapped:
continue
is_first_read = rec.pos < rec.mpos
get_clip = lambda c: c[1] if c[0] == 4 else None
clip_left = get_clip(rec.cigartuples[0])
if clip_left is not None:
softclips.append([clip_left, True, is_first_read])
clip_right = get_clip(rec.cigartuples[-1])
if clip_right is not None:
softclips.append([clip_right, False, is_first_read])
if i == 10000000:
break
softclips = pd.DataFrame(softclips, columns=["len", "left", "first_in_pair"])
def plot(*args, **kwargs):
softclips = args[0]
plt.hist(softclips, normed=True)
q95 = np.percentile(softclips, 99)
plt.plot([q95, q95], [0, 1.0], "--k")
m = max(softclips)
plt.plot([m, m], [0, 1.0], ":k")
plt.text(m, 1, "max={}".format(m), horizontalalignment="right", verticalalignment="top")
g = sns.FacetGrid(softclips, col="left", row="first_in_pair")
g = g.map(plot, "len")
plt.savefig(snakemake.output[0])
| 28.3125
| 92
| 0.675497
| 206
| 1,359
| 4.354369
| 0.436893
| 0.023411
| 0.036789
| 0.071349
| 0.06243
| 0.06243
| 0
| 0
| 0
| 0
| 0
| 0.028597
| 0.1766
| 1,359
| 47
| 93
| 28.914894
| 0.773012
| 0.014717
| 0
| 0
| 0
| 0
| 0.049327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.216216
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3c1af48960fabf760e667011b0450023e75e10
| 4,849
|
py
|
Python
|
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py
|
MetaMain/BewareAdvML
|
52d489b565b0df36cb588b5709c29c2e8e4d3f49
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T07:53:13.000Z
|
2022-03-25T07:53:13.000Z
|
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py
|
MetaMain/BewareAdvML
|
52d489b565b0df36cb588b5709c29c2e8e4d3f49
|
[
"BSD-3-Clause"
] | null | null | null |
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py
|
MetaMain/BewareAdvML
|
52d489b565b0df36cb588b5709c29c2e8e4d3f49
|
[
"BSD-3-Clause"
] | null | null | null |
import tensorflow
from tensorflow import keras
Model = keras.models.Model
Dense = keras.layers.Dense
Activation = keras.layers.Activation
Flatten = keras.layers.Flatten
BatchNormalization= keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
AveragePooling2D = keras.layers.AveragePooling2D
Input=keras.layers.Input
l2=keras.regularizers.l2
from tensorflow.keras import backend
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input, complexityParameter, num_classes=10, dataset='cifar10'):
depth = complexityParameter * 9 + 2
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = input
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tensorflow.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
final_features = Flatten()(x)
logits = Dense(num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs, logits, final_features
| 37.589147
| 80
| 0.545061
| 495
| 4,849
| 5.175758
| 0.274747
| 0.078064
| 0.032787
| 0.039032
| 0.272443
| 0.218579
| 0.159641
| 0.14676
| 0.106167
| 0.106167
| 0
| 0.018103
| 0.384822
| 4,849
| 129
| 81
| 37.589147
| 0.840764
| 0.176325
| 0
| 0.336842
| 0
| 0
| 0.023672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021053
| false
| 0
| 0.031579
| 0
| 0.073684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3cda3b610042fefd30969a702f9d925c74876f
| 4,421
|
py
|
Python
|
ttl2json.py
|
the-norman-sicily-project/genealogical-trees
|
32fa4f25861ae34543b0a6b95e54842c0018331b
|
[
"MIT"
] | 1
|
2021-05-18T20:39:30.000Z
|
2021-05-18T20:39:30.000Z
|
ttl2json.py
|
the-norman-sicily-project/genealogical-trees
|
32fa4f25861ae34543b0a6b95e54842c0018331b
|
[
"MIT"
] | null | null | null |
ttl2json.py
|
the-norman-sicily-project/genealogical-trees
|
32fa4f25861ae34543b0a6b95e54842c0018331b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import json
import rdflib
import rdflib.plugins.sparql as sparql
RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf']
RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent',
'isGreatGrandParentOf', 'isUncleOf', 'hasUncle',
'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt',
'isGreatAuntOf', 'hasGreatAunt',
'isBrotherOf', 'isSisterOf', 'isSiblingOf',
'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf']
RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER
try:
workpath = sys.argv[1]
except IndexError:
sys.exit("No path defined!")
try:
recursion_limit = int(sys.argv[2])
except IndexError:
recursion_limit = 0
if recursion_limit > 0:
sys.setrecursionlimit(recursion_limit)
g = rdflib.Graph()
g.parse(workpath, format="turtle")
fhkb_str = "http://www.example.com/genealogy.owl#"
schema_str = "https://schema.org/"
FHKB = rdflib.Namespace(fhkb_str)
SCHEMA_ORG = rdflib.Namespace(schema_str)
def dump(uriref):
if uriref.__contains__('#'):
return uriref.split('#')[-1]
return uriref.split('/')[-1]
graph = {}
graph['nodes'] = []
graph['edges'] = []
nodes = {}
q = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
}
ORDER BY ?person""")
for rel in RELS_OF_INTEREST:
pred = rdflib.URIRef("{}{}".format(fhkb_str, rel))
relation_query_results = g.query(q, initBindings={'pred': pred})
for (subj, pred, obj) in relation_query_results:
graph['edges'].append(
{
'data': {
'group': 'edges',
'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}',
'source': dump(subj),
'target': dump(obj),
'type': dump(pred)
}
})
q_details = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
FILTER NOT EXISTS {
?person ?testPred ?obj .
VALUES ?testPred {
fhkb:isWifeOf
fhkb:isMotherOf
fhkb:isFatherOf
fhkb:isHusbandOf
fhkb:isSpouseOf
fhkb:hasGrandParent
fhkb:isGrandParentOf
fhkb:hasGreatGrandParent
fhkb:isGreatGrandParentOf
fhkb:isUncleOf
fhkb:hasUncle
fhkb:isGreatUncleOf
fhkb:hasGreatUncle
fhkb:isAuntOf
fhkb:hasAunt
fhkb:isGreatAuntOf
fhkb:hasGreatAunt
fhkb:isBrotherOf
fhkb:isSisterOf
fhkb:isSiblingOf
fhkb:isFirstCousinOf
fhkb:isSecondCousinOf
fhkb:isThirdCousinOf
fhkb:hasRelation
fhkb:isPartnerIn
fhkb:isMalePartnerIn
fhkb:isFemalePartnerIn
fhkb:isBloodrelationOf
}
}
}
ORDER BY ?person"""
)
person_query_results = g.query(q_details)
for (subj, pred, obj) in person_query_results:
node = nodes.get(dump(subj), {
'data': {
'label': '',
'degree': 0,
'size': 10,
'alternateNames': [],
'honorificPrefixes': [],
'honorificSuffixes': [],
'images': [],
'id': dump(subj),
}})
if pred == FHKB.Sex:
node['data'][dump(pred)] = dump(obj)
elif pred.startswith(SCHEMA_ORG):
if dump(pred) == 'honorificSuffix':
node['data']['honorificSuffixes'].append(obj)
elif dump(pred) == 'honorificPrefix':
node['data']['honorificPrefixes'].append(obj)
elif dump(pred) == 'alternateName':
node['data']['alternateNames'].append(obj)
elif dump(pred) == 'image':
node['data']['images'].append(obj)
else:
node['data'][dump(pred)] = obj
elif pred == rdflib.RDFS.label:
node['data']['label'] = obj
else:
continue
nodes[dump(subj)] = node
graph['nodes'] = list(nodes.values())
print(json.dumps(graph, indent=0))
sys.exit(0)
| 28.339744
| 84
| 0.555078
| 425
| 4,421
| 5.687059
| 0.338824
| 0.026479
| 0.021514
| 0.021101
| 0.15391
| 0.098883
| 0.086885
| 0.086885
| 0.086885
| 0.086885
| 0
| 0.003938
| 0.310789
| 4,421
| 155
| 85
| 28.522581
| 0.789301
| 0.00475
| 0
| 0.090909
| 0
| 0
| 0.213577
| 0.011848
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011364
| false
| 0
| 0.045455
| 0
| 0.079545
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3d017dc9b9c85df909d024333ec6af657c45e5
| 53,871
|
py
|
Python
|
tests/rest/client/test_login.py
|
BearerPipelineTest/synapse-1
|
78b99de7c206b106340e12cdee0af9aa246bd5ad
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/client/test_login.py
|
BearerPipelineTest/synapse-1
|
78b99de7c206b106340e12cdee0af9aa246bd5ad
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/client/test_login.py
|
BearerPipelineTest/synapse-1
|
78b99de7c206b106340e12cdee0af9aa246bd5ad
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import urllib.parse
from typing import Any, Dict, List, Optional, Union
from unittest.mock import Mock
from urllib.parse import urlencode
import pymacaroons
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
from synapse.appservice import ApplicationService
from synapse.rest.client import devices, login, logout, register
from synapse.rest.client.account import WhoamiRestServlet
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
from synapse.types import create_requester
from synapse.util import Clock
from tests import unittest
from tests.handlers.test_oidc import HAS_OIDC
from tests.handlers.test_saml import has_saml2
from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG
from tests.server import FakeChannel
from tests.test_utils.html_parsers import TestHtmlParser
from tests.unittest import HomeserverTestCase, override_config, skip_unless
try:
import jwt
HAS_JWT = True
except ImportError:
HAS_JWT = False
# synapse server name: used to populate public_baseurl in some tests
SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse"
# public_baseurl for some tests. It uses an http:// scheme because
# FakeChannel.isSecure() returns False, so synapse will see the requested uri as
# http://..., so using http in the public_baseurl stops Synapse trying to redirect to
# https://....
BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,)
# CAS server used in some tests
CAS_SERVER = "https://fake.test"
# just enough to tell pysaml2 where to redirect to
SAML_SERVER = "https://test.saml.server/idp/sso"
TEST_SAML_METADATA = """
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata">
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/>
</md:IDPSSODescriptor>
</md:EntityDescriptor>
""" % {
"SAML_SERVER": SAML_SERVER,
}
LOGIN_URL = b"/_matrix/client/r0/login"
TEST_URL = b"/_matrix/client/r0/account/whoami"
# a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is +
TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="fö%26=o"'
# the query params in TEST_CLIENT_REDIRECT_URL
EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("<ab c>", ""), ('q" =+"', '"fö&=o"')]
# (possibly experimental) login flows we expect to appear in the list after the normal
# ones
ADDITIONAL_LOGIN_FLOWS = [
{"type": "m.login.application_service"},
{"type": "uk.half-shot.msc2778.login.application_service"},
]
class LoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
logout.register_servlets,
devices.register_servlets,
lambda hs, http_server: WhoamiRestServlet(hs).register(http_server),
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.hs = self.setup_test_homeserver()
self.hs.config.registration.enable_registration = True
self.hs.config.registration.registrations_require_3pid = []
self.hs.config.registration.auto_join_rooms = []
self.hs.config.captcha.enable_registration_captcha = False
return self.hs
@override_config(
{
"rc_login": {
"address": {"per_second": 0.17, "burst_count": 5},
# Prevent the account login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"account": {"per_second": 10000, "burst_count": 10000},
}
}
)
def test_POST_ratelimiting_per_address(self) -> None:
# Create different users so we're sure not to be bothered by the per-user
# ratelimiter.
for i in range(0, 6):
self.register_user("kermit" + str(i), "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
"password": "monkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"200", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
"password": "monkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config(
{
"rc_login": {
"account": {"per_second": 0.17, "burst_count": 5},
# Prevent the address login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
}
}
)
def test_POST_ratelimiting_per_account(self) -> None:
self.register_user("kermit", "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "monkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"200", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "monkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config(
{
"rc_login": {
# Prevent the address login ratelimiter from raising first
#
# This is normally covered by the default test homeserver config
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
"failed_attempts": {"per_second": 0.17, "burst_count": 5},
}
}
)
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
self.register_user("kermit", "monkey")
for i in range(0, 6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "notamonkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
self.assertEqual(channel.result["code"], b"429", channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
self.assertEqual(channel.result["code"], b"403", channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "notamonkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"403", channel.result)
@override_config({"session_lifetime": "24h"})
def test_soft_logout(self) -> None:
self.register_user("kermit", "monkey")
# we shouldn't be able to make requests without an access token
channel = self.make_request(b"GET", TEST_URL)
self.assertEqual(channel.result["code"], b"401", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN")
# log in as normal
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
"password": "monkey",
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.code, 200, channel.result)
access_token = channel.json_body["access_token"]
device_id = channel.json_body["device_id"]
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
#
# test behaviour after deleting the expired device
#
# we now log in as a different device
access_token_2 = self.login("kermit", "monkey")
# more requests with the expired token should still return a soft-logout
self.reactor.advance(3600)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# ... but if we delete that device, it will be a proper logout
self._delete_device(access_token_2, "kermit", "monkey", device_id)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], False)
def _delete_device(
self, access_token: str, user_id: str, password: str, device_id: str
) -> None:
"""Perform the UI-Auth to delete a device"""
channel = self.make_request(
b"DELETE", "devices/" + device_id, access_token=access_token
)
self.assertEqual(channel.code, 401, channel.result)
# check it's a UI-Auth fail
self.assertEqual(
set(channel.json_body.keys()),
{"flows", "params", "session"},
channel.result,
)
auth = {
"type": "m.login.password",
# https://github.com/matrix-org/synapse/issues/5665
# "identifier": {"type": "m.id.user", "user": user_id},
"user": user_id,
"password": password,
"session": channel.json_body["session"],
}
channel = self.make_request(
b"DELETE",
"devices/" + device_id,
access_token=access_token,
content={"auth": auth},
)
self.assertEqual(channel.code, 200, channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None:
self.register_user("kermit", "monkey")
# log in as normal
access_token = self.login("kermit", "monkey")
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard logout this session
channel = self.make_request(b"POST", "/logout", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out(
self,
) -> None:
self.register_user("kermit", "monkey")
# log in as normal
access_token = self.login("kermit", "monkey")
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard log out all of the user's sessions
channel = self.make_request(b"POST", "/logout/all", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_with_overly_long_device_id_fails(self) -> None:
self.register_user("mickey", "cheese")
# create a device_id longer than 512 characters
device_id = "yolo" * 512
body = {
"type": "m.login.password",
"user": "mickey",
"password": "cheese",
"device_id": device_id,
}
# make a login request with the bad device_id
channel = self.make_request(
"POST",
"/_matrix/client/v3/login",
json.dumps(body).encode("utf8"),
custom_headers=None,
)
# test that the login fails with the correct error code
self.assertEqual(channel.code, 400)
self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
class MultiSSOTestCase(unittest.HomeserverTestCase):
"""Tests for homeservers with multiple SSO providers enabled"""
servlets = [
login.register_servlets,
]
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["public_baseurl"] = BASE_URL
config["cas_config"] = {
"enabled": True,
"server_url": CAS_SERVER,
"service_url": "https://matrix.goodserver.com:8448",
}
config["saml2_config"] = {
"sp_config": {
"metadata": {"inline": [TEST_SAML_METADATA]},
# use the XMLSecurity backend to avoid relying on xmlsec1
"crypto_backend": "XMLSecurity",
},
}
# default OIDC provider
config["oidc_config"] = TEST_OIDC_CONFIG
# additional OIDC providers
config["oidc_providers"] = [
{
"idp_id": "idp1",
"idp_name": "IDP1",
"discover": False,
"issuer": "https://issuer1",
"client_id": "test-client-id",
"client_secret": "test-client-secret",
"scopes": ["profile"],
"authorization_endpoint": "https://issuer1/auth",
"token_endpoint": "https://issuer1/token",
"userinfo_endpoint": "https://issuer1/userinfo",
"user_mapping_provider": {
"config": {"localpart_template": "{{ user.sub }}"}
},
}
]
return config
def create_resource_dict(self) -> Dict[str, Resource]:
d = super().create_resource_dict()
d.update(build_synapse_client_resource_tree(self.hs))
return d
def test_get_login_flows(self) -> None:
"""GET /login should return password and SSO flows"""
channel = self.make_request("GET", "/_matrix/client/r0/login")
self.assertEqual(channel.code, 200, channel.result)
expected_flow_types = [
"m.login.cas",
"m.login.sso",
"m.login.token",
"m.login.password",
] + [f["type"] for f in ADDITIONAL_LOGIN_FLOWS]
self.assertCountEqual(
[f["type"] for f in channel.json_body["flows"]], expected_flow_types
)
flows = {flow["type"]: flow for flow in channel.json_body["flows"]}
self.assertCountEqual(
flows["m.login.sso"]["identity_providers"],
[
{"id": "cas", "name": "CAS"},
{"id": "saml", "name": "SAML"},
{"id": "oidc-idp1", "name": "IDP1"},
{"id": "oidc", "name": "OIDC"},
],
)
def test_multi_sso_redirect(self) -> None:
"""/login/sso/redirect should redirect to an identity picker"""
# first hit the redirect url, which should redirect to our idp picker
channel = self._make_sso_redirect_request(None)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
uri = location_headers[0]
# hitting that picker should give us some HTML
channel = self.make_request("GET", uri)
self.assertEqual(channel.code, 200, channel.result)
# parse the form to check it has fields assumed elsewhere in this class
html = channel.result["body"].decode("utf-8")
p = TestHtmlParser()
p.feed(html)
p.close()
# there should be a link for each href
returned_idps: List[str] = []
for link in p.links:
path, query = link.split("?", 1)
self.assertEqual(path, "pick_idp")
params = urllib.parse.parse_qs(query)
self.assertEqual(params["redirectUrl"], [TEST_CLIENT_REDIRECT_URL])
returned_idps.append(params["idp"][0])
self.assertCountEqual(returned_idps, ["cas", "oidc", "oidc-idp1", "saml"])
def test_multi_sso_redirect_to_cas(self) -> None:
"""If CAS is chosen, should redirect to the CAS server"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=cas",
shorthand=False,
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
cas_uri = location_headers[0]
cas_uri_path, cas_uri_query = cas_uri.split("?", 1)
# it should redirect us to the login page of the cas server
self.assertEqual(cas_uri_path, CAS_SERVER + "/login")
# check that the redirectUrl is correctly encoded in the service param - ie, the
# place that CAS will redirect to
cas_uri_params = urllib.parse.parse_qs(cas_uri_query)
service_uri = cas_uri_params["service"][0]
_, service_uri_query = service_uri.split("?", 1)
service_uri_params = urllib.parse.parse_qs(service_uri_query)
self.assertEqual(service_uri_params["redirectUrl"][0], TEST_CLIENT_REDIRECT_URL)
def test_multi_sso_redirect_to_saml(self) -> None:
"""If SAML is chosen, should redirect to the SAML server"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=saml",
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
saml_uri = location_headers[0]
saml_uri_path, saml_uri_query = saml_uri.split("?", 1)
# it should redirect us to the login page of the SAML server
self.assertEqual(saml_uri_path, SAML_SERVER)
# the RelayState is used to carry the client redirect url
saml_uri_params = urllib.parse.parse_qs(saml_uri_query)
relay_state_param = saml_uri_params["RelayState"][0]
self.assertEqual(relay_state_param, TEST_CLIENT_REDIRECT_URL)
def test_login_via_oidc(self) -> None:
"""If OIDC is chosen, should redirect to the OIDC auth endpoint"""
# pick the default OIDC provider
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl="
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=oidc",
)
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
# ... and should have set a cookie including the redirect url
cookie_headers = channel.headers.getRawHeaders("Set-Cookie")
assert cookie_headers
cookies: Dict[str, str] = {}
for h in cookie_headers:
key, value = h.split(";")[0].split("=", maxsplit=1)
cookies[key] = value
oidc_session_cookie = cookies["oidc_session"]
macaroon = pymacaroons.Macaroon.deserialize(oidc_session_cookie)
self.assertEqual(
self._get_value_from_macaroon(macaroon, "client_redirect_url"),
TEST_CLIENT_REDIRECT_URL,
)
channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"})
# that should serve a confirmation page
self.assertEqual(channel.code, 200, channel.result)
content_type_headers = channel.headers.getRawHeaders("Content-Type")
assert content_type_headers
self.assertTrue(content_type_headers[-1].startswith("text/html"))
p = TestHtmlParser()
p.feed(channel.text_body)
p.close()
# ... which should contain our redirect link
self.assertEqual(len(p.links), 1)
path, query = p.links[0].split("?", 1)
self.assertEqual(path, "https://x")
# it will have url-encoded the params properly, so we'll have to parse them
params = urllib.parse.parse_qsl(
query, keep_blank_values=True, strict_parsing=True, errors="strict"
)
self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
self.assertEqual(params[2][0], "loginToken")
# finally, submit the matrix login token to the login API, which gives us our
# matrix access token, mxid, and device id.
login_token = params[2][1]
chan = self.make_request(
"POST",
"/login",
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@user1:test")
def test_multi_sso_redirect_to_unknown(self) -> None:
"""An unknown IdP should cause a 400"""
channel = self.make_request(
"GET",
"/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz",
)
self.assertEqual(channel.code, 400, channel.result)
def test_client_idp_redirect_to_unknown(self) -> None:
"""If the client tries to pick an unknown IdP, return a 404"""
channel = self._make_sso_redirect_request("xxx")
self.assertEqual(channel.code, 404, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
def test_client_idp_redirect_to_oidc(self) -> None:
"""If the client pick a known IdP, redirect to it"""
channel = self._make_sso_redirect_request("oidc")
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel:
"""Send a request to /_matrix/client/r0/login/sso/redirect
... possibly specifying an IDP provider
"""
endpoint = "/_matrix/client/r0/login/sso/redirect"
if idp_prov is not None:
endpoint += "/" + idp_prov
endpoint += "?redirectUrl=" + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
return self.make_request(
"GET",
endpoint,
custom_headers=[("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME)],
)
@staticmethod
def _get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
prefix = key + " = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(prefix):
return caveat.caveat_id[len(prefix) :]
raise ValueError("No %s caveat in macaroon" % (key,))
class CASTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.base_url = "https://matrix.goodserver.com/"
self.redirect_path = "_synapse/client/login/sso/redirect/confirm"
config = self.default_config()
config["public_baseurl"] = (
config.get("public_baseurl") or "https://matrix.goodserver.com:8448"
)
config["cas_config"] = {
"enabled": True,
"server_url": CAS_SERVER,
}
cas_user_id = "username"
self.user_id = "@%s:test" % cas_user_id
async def get_raw(uri: str, args: Any) -> bytes:
"""Return an example response payload from a call to the `/proxyValidate`
endpoint of a CAS server, copied from
https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20
This needs to be returned by an async function (as opposed to set as the
mock's return value) because the corresponding Synapse code awaits on it.
"""
return (
"""
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>%s</cas:user>
<cas:proxyGrantingTicket>PGTIOU-84678-8a9d...</cas:proxyGrantingTicket>
<cas:proxies>
<cas:proxy>https://proxy2/pgtUrl</cas:proxy>
<cas:proxy>https://proxy1/pgtUrl</cas:proxy>
</cas:proxies>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
% cas_user_id
).encode("utf-8")
mocked_http_client = Mock(spec=["get_raw"])
mocked_http_client.get_raw.side_effect = get_raw
self.hs = self.setup_test_homeserver(
config=config,
proxied_http_client=mocked_http_client,
)
return self.hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.deactivate_account_handler = hs.get_deactivate_account_handler()
def test_cas_redirect_confirm(self) -> None:
"""Tests that the SSO login flow serves a confirmation page before redirecting a
user to the redirect URL.
"""
base_url = "/_matrix/client/r0/login/cas/ticket?redirectUrl"
redirect_url = "https://dodgy-site.com/"
url_parts = list(urllib.parse.urlparse(base_url))
query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update({"redirectUrl": redirect_url})
query.update({"ticket": "ticket"})
url_parts[4] = urllib.parse.urlencode(query)
cas_ticket_url = urllib.parse.urlunparse(url_parts)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
# Test that the response is HTML.
self.assertEqual(channel.code, 200, channel.result)
content_type_header_value = ""
for header in channel.result.get("headers", []):
if header[0] == b"Content-Type":
content_type_header_value = header[1].decode("utf8")
self.assertTrue(content_type_header_value.startswith("text/html"))
# Test that the body isn't empty.
self.assertTrue(len(channel.result["body"]) > 0)
# And that it contains our redirect link
self.assertIn(redirect_url, channel.result["body"].decode("UTF-8"))
@override_config(
{
"sso": {
"client_whitelist": [
"https://legit-site.com/",
"https://other-site.com/",
]
}
}
)
def test_cas_redirect_whitelisted(self) -> None:
"""Tests that the SSO login flow serves a redirect to a whitelisted url"""
self._test_redirect("https://legit-site.com/")
@override_config({"public_baseurl": "https://example.com"})
def test_cas_redirect_login_fallback(self) -> None:
self._test_redirect("https://example.com/_matrix/static/client/login")
def _test_redirect(self, redirect_url: str) -> None:
"""Tests that the SSO login flow serves a redirect for the given redirect URL."""
cas_ticket_url = (
"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket"
% (urllib.parse.quote(redirect_url))
)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
self.assertEqual(channel.code, 302)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
self.assertEqual(location_headers[0][: len(redirect_url)], redirect_url)
@override_config({"sso": {"client_whitelist": ["https://legit-site.com/"]}})
def test_deactivated_user(self) -> None:
"""Logging in as a deactivated account should error."""
redirect_url = "https://legit-site.com/"
# First login (to create the user).
self._test_redirect(redirect_url)
# Deactivate the account.
self.get_success(
self.deactivate_account_handler.deactivate_account(
self.user_id, False, create_requester(self.user_id)
)
)
# Request the CAS ticket.
cas_ticket_url = (
"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket"
% (urllib.parse.quote(redirect_url))
)
# Get Synapse to call the fake CAS and serve the template.
channel = self.make_request("GET", cas_ticket_url)
# Because the user is deactivated they are served an error template.
self.assertEqual(channel.code, 403)
self.assertIn(b"SSO account deactivated", channel.result["body"])
@skip_unless(HAS_JWT, "requires jwt")
class JWTTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
]
jwt_secret = "secret"
jwt_algorithm = "HS256"
base_config = {
"enabled": True,
"secret": jwt_secret,
"algorithm": jwt_algorithm,
}
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
# If jwt_config has been defined (eg via @override_config), don't replace it.
if config.get("jwt_config") is None:
config["jwt_config"] = self.base_config
return config
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
result: Union[str, bytes] = jwt.encode(payload, secret, self.jwt_algorithm)
if isinstance(result, bytes):
return result.decode("ascii")
return result
def jwt_login(self, *args: Any) -> FakeChannel:
params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)}
channel = self.make_request(b"POST", LOGIN_URL, params)
return channel
def test_login_jwt_valid_registered(self) -> None:
self.register_user("kermit", "monkey")
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_valid_unregistered(self) -> None:
channel = self.jwt_login({"sub": "frog"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, "notsecret")
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: Signature verification failed",
)
def test_login_jwt_expired(self) -> None:
channel = self.jwt_login({"sub": "frog", "exp": 864000})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Signature has expired"
)
def test_login_jwt_not_before(self) -> None:
now = int(time.time())
channel = self.jwt_login({"sub": "frog", "nbf": now + 3600})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: The token is not yet valid (nbf)",
)
def test_login_no_sub(self) -> None:
channel = self.jwt_login({"username": "root"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Invalid JWT")
@override_config({"jwt_config": {**base_config, "issuer": "test-issuer"}})
def test_login_iss(self) -> None:
"""Test validating the issuer claim."""
# A valid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "test-issuer"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid issuer"
)
# Not providing an issuer.
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
'JWT validation failed: Token is missing the "iss" claim',
)
def test_login_iss_no_config(self) -> None:
"""Test providing an issuer claim without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "audiences": ["test-audience"]}})
def test_login_aud(self) -> None:
"""Test validating the audience claim."""
# A valid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "test-audience"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid audience"
)
# Not providing an audience.
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
'JWT validation failed: Token is missing the "aud" claim',
)
def test_login_aud_no_config(self) -> None:
"""Test providing an audience without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"], "JWT validation failed: Invalid audience"
)
def test_login_default_sub(self) -> None:
"""Test reading user ID from the default subject claim."""
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "subject_claim": "username"}})
def test_login_custom_sub(self) -> None:
"""Test reading user ID from a custom subject claim."""
channel = self.jwt_login({"username": "frog"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_no_token(self) -> None:
params = {"type": "org.matrix.login.jwt"}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Token field for JWT is missing")
# The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use
# RSS256, with a public key configured in synapse as "jwt_secret", and tokens
# signed by the private key.
@skip_unless(HAS_JWT, "requires jwt")
class JWTPubKeyTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
]
# This key's pubkey is used as the jwt_secret setting of synapse. Valid
# tokens are signed by this and validated using the pubkey. It is generated
# with `openssl genrsa 512` (not a secure way to generate real keys, but
# good enough for tests!)
jwt_privatekey = "\n".join(
[
"-----BEGIN RSA PRIVATE KEY-----",
"MIIBPAIBAAJBAM50f1Q5gsdmzifLstzLHb5NhfajiOt7TKO1vSEWdq7u9x8SMFiB",
"492RM9W/XFoh8WUfL9uL6Now6tPRDsWv3xsCAwEAAQJAUv7OOSOtiU+wzJq82rnk",
"yR4NHqt7XX8BvkZPM7/+EjBRanmZNSp5kYZzKVaZ/gTOM9+9MwlmhidrUOweKfB/",
"kQIhAPZwHazbjo7dYlJs7wPQz1vd+aHSEH+3uQKIysebkmm3AiEA1nc6mDdmgiUq",
"TpIN8A4MBKmfZMWTLq6z05y/qjKyxb0CIQDYJxCwTEenIaEa4PdoJl+qmXFasVDN",
"ZU0+XtNV7yul0wIhAMI9IhiStIjS2EppBa6RSlk+t1oxh2gUWlIh+YVQfZGRAiEA",
"tqBR7qLZGJ5CVKxWmNhJZGt1QHoUtOch8t9C4IdOZ2g=",
"-----END RSA PRIVATE KEY-----",
]
)
# Generated with `openssl rsa -in foo.key -pubout`, with the the above
# private key placed in foo.key (jwt_privatekey).
jwt_pubkey = "\n".join(
[
"-----BEGIN PUBLIC KEY-----",
"MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAM50f1Q5gsdmzifLstzLHb5NhfajiOt7",
"TKO1vSEWdq7u9x8SMFiB492RM9W/XFoh8WUfL9uL6Now6tPRDsWv3xsCAwEAAQ==",
"-----END PUBLIC KEY-----",
]
)
# This key is used to sign tokens that shouldn't be accepted by synapse.
# Generated just like jwt_privatekey.
bad_privatekey = "\n".join(
[
"-----BEGIN RSA PRIVATE KEY-----",
"MIIBOgIBAAJBAL//SQrKpKbjCCnv/FlasJCv+t3k/MPsZfniJe4DVFhsktF2lwQv",
"gLjmQD3jBUTz+/FndLSBvr3F4OHtGL9O/osCAwEAAQJAJqH0jZJW7Smzo9ShP02L",
"R6HRZcLExZuUrWI+5ZSP7TaZ1uwJzGFspDrunqaVoPobndw/8VsP8HFyKtceC7vY",
"uQIhAPdYInDDSJ8rFKGiy3Ajv5KWISBicjevWHF9dbotmNO9AiEAxrdRJVU+EI9I",
"eB4qRZpY6n4pnwyP0p8f/A3NBaQPG+cCIFlj08aW/PbxNdqYoBdeBA0xDrXKfmbb",
"iwYxBkwL0JCtAiBYmsi94sJn09u2Y4zpuCbJeDPKzWkbuwQh+W1fhIWQJQIhAKR0",
"KydN6cRLvphNQ9c/vBTdlzWxzcSxREpguC7F1J1m",
"-----END RSA PRIVATE KEY-----",
]
)
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["jwt_config"] = {
"enabled": True,
"secret": self.jwt_pubkey,
"algorithm": "RS256",
}
return config
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
result: Union[bytes, str] = jwt.encode(payload, secret, "RS256")
if isinstance(result, bytes):
return result.decode("ascii")
return result
def jwt_login(self, *args: Any) -> FakeChannel:
params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)}
channel = self.make_request(b"POST", LOGIN_URL, params)
return channel
def test_login_jwt_valid(self) -> None:
channel = self.jwt_login({"sub": "kermit"})
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, self.bad_privatekey)
self.assertEqual(channel.result["code"], b"403", channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
"JWT validation failed: Signature verification failed",
)
AS_USER = "as_user_alice"
class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
register.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.hs = self.setup_test_homeserver()
self.service = ApplicationService(
id="unique_identifier",
token="some_token",
hostname="example.com",
sender="@asbot:example.com",
namespaces={
ApplicationService.NS_USERS: [
{"regex": r"@as_user.*", "exclusive": False}
],
ApplicationService.NS_ROOMS: [],
ApplicationService.NS_ALIASES: [],
},
)
self.another_service = ApplicationService(
id="another__identifier",
token="another_token",
hostname="example.com",
sender="@as2bot:example.com",
namespaces={
ApplicationService.NS_USERS: [
{"regex": r"@as2_user.*", "exclusive": False}
],
ApplicationService.NS_ROOMS: [],
ApplicationService.NS_ALIASES: [],
},
)
self.hs.get_datastores().main.services_cache.append(self.service)
self.hs.get_datastores().main.services_cache.append(self.another_service)
return self.hs
def test_login_appservice_user(self) -> None:
"""Test that an appservice user can use /login"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_appservice_user_bot(self) -> None:
"""Test that the appservice bot can use /login"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": self.service.sender},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"200", channel.result)
def test_login_appservice_wrong_user(self) -> None:
"""Test that non-as users cannot login with the as token"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": "fibble_wibble"},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.service.token
)
self.assertEqual(channel.result["code"], b"403", channel.result)
def test_login_appservice_wrong_as(self) -> None:
"""Test that as users cannot login with wrong as token"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(
b"POST", LOGIN_URL, params, access_token=self.another_service.token
)
self.assertEqual(channel.result["code"], b"403", channel.result)
def test_login_appservice_no_token(self) -> None:
"""Test that users must provide a token when using the appservice
login method
"""
self.register_appservice_user(AS_USER, self.service.token)
params = {
"type": login.LoginRestServlet.APPSERVICE_TYPE,
"identifier": {"type": "m.id.user", "user": AS_USER},
}
channel = self.make_request(b"POST", LOGIN_URL, params)
self.assertEqual(channel.result["code"], b"401", channel.result)
@skip_unless(HAS_OIDC, "requires OIDC")
class UsernamePickerTestCase(HomeserverTestCase):
"""Tests for the username picker flow of SSO login"""
servlets = [login.register_servlets]
def default_config(self) -> Dict[str, Any]:
config = super().default_config()
config["public_baseurl"] = BASE_URL
config["oidc_config"] = {}
config["oidc_config"].update(TEST_OIDC_CONFIG)
config["oidc_config"]["user_mapping_provider"] = {
"config": {"display_name_template": "{{ user.displayname }}"}
}
# whitelist this client URI so we redirect straight to it rather than
# serving a confirmation page
config["sso"] = {"client_whitelist": ["https://x"]}
return config
def create_resource_dict(self) -> Dict[str, Resource]:
d = super().create_resource_dict()
d.update(build_synapse_client_resource_tree(self.hs))
return d
def test_username_picker(self) -> None:
"""Test the happy path of a username picker flow."""
# do the start of the login flow
channel = self.helper.auth_via_oidc(
{"sub": "tester", "displayname": "Jonny"}, TEST_CLIENT_REDIRECT_URL
)
# that should redirect to the username picker
self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
picker_url = location_headers[0]
self.assertEqual(picker_url, "/_synapse/client/pick_username/account_details")
# ... with a username_mapping_session cookie
cookies: Dict[str, str] = {}
channel.extract_cookies(cookies)
self.assertIn("username_mapping_session", cookies)
session_id = cookies["username_mapping_session"]
# introspect the sso handler a bit to check that the username mapping session
# looks ok.
username_mapping_sessions = self.hs.get_sso_handler()._username_mapping_sessions
self.assertIn(
session_id,
username_mapping_sessions,
"session id not found in map",
)
session = username_mapping_sessions[session_id]
self.assertEqual(session.remote_user_id, "tester")
self.assertEqual(session.display_name, "Jonny")
self.assertEqual(session.client_redirect_url, TEST_CLIENT_REDIRECT_URL)
# the expiry time should be about 15 minutes away
expected_expiry = self.clock.time_msec() + (15 * 60 * 1000)
self.assertApproximates(session.expiry_time_ms, expected_expiry, tolerance=1000)
# Now, submit a username to the username picker, which should serve a redirect
# to the completion page
content = urlencode({b"username": b"bobby"}).encode("utf8")
chan = self.make_request(
"POST",
path=picker_url,
content=content,
content_is_form=True,
custom_headers=[
("Cookie", "username_mapping_session=" + session_id),
# old versions of twisted don't do form-parsing without a valid
# content-length header.
("Content-Length", str(len(content))),
],
)
self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
# send a request to the completion page, which should 302 to the client redirectUrl
chan = self.make_request(
"GET",
path=location_headers[0],
custom_headers=[("Cookie", "username_mapping_session=" + session_id)],
)
self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
# ensure that the returned location matches the requested redirect URL
path, query = location_headers[0].split("?", 1)
self.assertEqual(path, "https://x")
# it will have url-encoded the params properly, so we'll have to parse them
params = urllib.parse.parse_qsl(
query, keep_blank_values=True, strict_parsing=True, errors="strict"
)
self.assertEqual(params[0:2], EXPECTED_CLIENT_REDIRECT_URL_PARAMS)
self.assertEqual(params[2][0], "loginToken")
# fish the login token out of the returned redirect uri
login_token = params[2][1]
# finally, submit the matrix login token to the login API, which gives us our
# matrix access token, mxid, and device id.
chan = self.make_request(
"POST",
"/login",
content={"type": "m.login.token", "token": login_token},
)
self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@bobby:test")
| 40.443694
| 119
| 0.626218
| 6,317
| 53,871
| 5.168276
| 0.120785
| 0.061106
| 0.070755
| 0.034244
| 0.570724
| 0.536694
| 0.503706
| 0.48251
| 0.469278
| 0.446949
| 0
| 0.016304
| 0.255369
| 53,871
| 1,331
| 120
| 40.47408
| 0.797577
| 0.153608
| 0
| 0.443593
| 0
| 0.003286
| 0.174903
| 0.046257
| 0
| 0
| 0
| 0
| 0.173056
| 1
| 0.063527
| false
| 0.021906
| 0.028478
| 0
| 0.133625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3d26451658f18eb6e4d945d41095c7fba3dc44
| 1,683
|
py
|
Python
|
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py
|
Kevinskwk/rmf_demos
|
2d7b9c7c75211b89b91977e5d1a66f440cc5df95
|
[
"Apache-2.0"
] | null | null | null |
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py
|
Kevinskwk/rmf_demos
|
2d7b9c7c75211b89b91977e5d1a66f440cc5df95
|
[
"Apache-2.0"
] | null | null | null |
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py
|
Kevinskwk/rmf_demos
|
2d7b9c7c75211b89b91977e5d1a66f440cc5df95
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from time import sleep
import uuid
import rclpy
from rmf_task_msgs.msg import Delivery
def main(argv = sys.argv):
rclpy.init(args=argv)
args_without_ros = rclpy.utilities.remove_ros_args(argv)
'''
# Example request:
task_id: randomid_001
items: [itemA, itemB....]
pickup_place_name: cssd_room
pickup_behavior:
- name: dispenser
- parameters: [request_guid: xxx, target_guid:cssdbot, transporter_type:mir]
dropoff_place_name: ot_prep_room
dropoff_behavior:
- name: dispenser
- parameters: [request_guid: yyy, target_guid:otbot, transporter_type:mir]
'''
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pickup', default='pantry', help='Start waypoint')
parser.add_argument('-d', '--dropoff', default='hardware_2', help='Finish waypoint')
parser.add_argument('-i', '--task-id', help='Task ID', default='', type=str)
parser.add_argument('-r', '--robot-type', help='Type of robot', default='magni')
args = parser.parse_args(args_without_ros[1:])
node = rclpy.create_node('loop_request_publisher')
publisher = node.create_publisher(Delivery, 'delivery_requests', 10)
sleep(0.5)
request = Delivery()
if args.task_id:
request.task_id = args.task_id
else:
request.task_id = 'delivery#' + str(uuid.uuid1())
request.pickup_place_name = args.pickup
request.dropoff_place_name = args.dropoff
for _ in range(5):
publisher.publish(request)
sleep(0.5)
rclpy.shutdown()
print(f'Delivery request submitted to {args.robot_type}')
if __name__ == '__main__':
main(sys.argv)
| 27.590164
| 88
| 0.680333
| 218
| 1,683
| 5.004587
| 0.422018
| 0.038497
| 0.062328
| 0.056829
| 0.076994
| 0.076994
| 0
| 0
| 0
| 0
| 0
| 0.009531
| 0.189542
| 1,683
| 60
| 89
| 28.05
| 0.790323
| 0
| 0
| 0.0625
| 0
| 0
| 0.171765
| 0.017255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.1875
| 0
| 0.21875
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3d8aa1a0610f6e6749b406310d289569ef5143
| 13,701
|
py
|
Python
|
dis_snek/api/http/http_client.py
|
BoredManCodes/Dis-Snek
|
662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b
|
[
"MIT"
] | null | null | null |
dis_snek/api/http/http_client.py
|
BoredManCodes/Dis-Snek
|
662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b
|
[
"MIT"
] | null | null | null |
dis_snek/api/http/http_client.py
|
BoredManCodes/Dis-Snek
|
662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b
|
[
"MIT"
] | null | null | null |
"""This file handles the interaction with discords http endpoints."""
import asyncio
import logging
from typing import Any, Dict, Optional, Union
from urllib.parse import quote as _uriquote
from weakref import WeakValueDictionary
import aiohttp
from aiohttp import BaseConnector, ClientSession, ClientWebSocketResponse, FormData
from multidict import CIMultiDictProxy
from dis_snek.api.http.http_requests import (
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
)
from dis_snek.client.const import __py_version__, __repo_url__, __version__, logger_name, MISSING, Absent
from dis_snek.client.errors import DiscordError, Forbidden, GatewayNotFound, HTTPException, NotFound, LoginError
from dis_snek.client.utils.input_utils import response_decode
from dis_snek.client.utils.serializer import dict_filter_missing
from dis_snek.models import CooldownSystem
from .route import Route
__all__ = ["HTTPClient"]
log = logging.getLogger(logger_name)
class GlobalLock:
"""Manages the global ratelimit"""
def __init__(self) -> None:
self.cooldown_system: CooldownSystem = CooldownSystem(
45, 1
) # global rate-limit is 50 per second, conservatively we use 45
self._lock: asyncio.Lock = asyncio.Lock()
async def rate_limit(self) -> None:
async with self._lock:
while not self.cooldown_system.acquire_token():
await asyncio.sleep(self.cooldown_system.get_cooldown_time())
async def lock(self, delta: float) -> None:
"""
Lock the global lock for a given duration.
Args:
delta: The time to keep the lock acquired
"""
await self._lock.acquire()
await asyncio.sleep(delta)
self._lock.release()
class BucketLock:
"""Manages the ratelimit for each bucket"""
def __init__(self) -> None:
self._lock: asyncio.Lock = asyncio.Lock()
self.unlock_on_exit: bool = True
self.bucket_hash: Optional[str] = None
self.limit: int = -1
self.remaining: int = -1
self.delta: float = 0.0
def __repr__(self) -> str:
return f"<BucketLock: {self.bucket_hash or 'Generic'}>"
@property
def locked(self) -> bool:
"""Return True if lock is acquired."""
return self._lock.locked()
def unlock(self) -> None:
"""Unlock this bucket."""
self._lock.release()
def ingest_ratelimit_header(self, header: CIMultiDictProxy) -> None:
"""
Ingests a discord rate limit header to configure this bucket lock.
Args:
header: A header from a http response
"""
self.bucket_hash = header.get("x-ratelimit-bucket")
self.limit = int(header.get("x-ratelimit-limit") or -1)
self.remaining = int(header.get("x-ratelimit-remaining") or -1)
self.delta = float(header.get("x-ratelimit-reset-after", 0.0))
async def blind_defer_unlock(self) -> None:
"""Unlocks the BucketLock but doesn't wait for completion."""
self.unlock_on_exit = False
loop = asyncio.get_running_loop()
loop.call_later(self.delta, self.unlock)
async def defer_unlock(self) -> None:
"""Unlocks the BucketLock after a specified delay."""
self.unlock_on_exit = False
await asyncio.sleep(self.delta)
self.unlock()
async def __aenter__(self) -> None:
await self._lock.acquire()
async def __aexit__(self, *args) -> None:
if self.unlock_on_exit and self._lock.locked():
self.unlock()
self.unlock_on_exit = True
class HTTPClient(
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
):
"""A http client for sending requests to the Discord API."""
def __init__(self, connector: Optional[BaseConnector] = None, loop: Optional[asyncio.AbstractEventLoop] = None):
self.connector: Optional[BaseConnector] = connector
self.loop = asyncio.get_event_loop() if loop is None else loop
self.__session: Absent[Optional[ClientSession]] = MISSING
self.token: Optional[str] = None
self.global_lock: GlobalLock = GlobalLock()
self._max_attempts: int = 3
self.ratelimit_locks: WeakValueDictionary[str, BucketLock] = WeakValueDictionary()
self._endpoints = {}
self.user_agent: str = (
f"DiscordBot ({__repo_url__} {__version__} Python/{__py_version__}) aiohttp/{aiohttp.__version__}"
)
def __del__(self):
if self.__session and not self.__session.closed:
self.loop.run_until_complete(self.__session.close())
def get_ratelimit(self, route: Route) -> BucketLock:
"""
Get a route's rate limit bucket.
Args:
route: The route to fetch the ratelimit bucket for
Returns:
The BucketLock object for this route
"""
if bucket_hash := self._endpoints.get(route.rl_bucket):
# we have seen this route before, we know which bucket it is associated with
lock = self.ratelimit_locks.get(bucket_hash)
if lock:
# if we have an active lock on this route, it'll still be in the cache
# return that lock
return lock
# if no cached lock exists, return a new lock
return BucketLock()
def ingest_ratelimit(self, route: Route, header: CIMultiDictProxy, bucket_lock: BucketLock) -> None:
"""
Ingests a ratelimit header from discord to determine ratelimit.
Args:
route: The route we're ingesting ratelimit for
header: The rate limit header in question
bucket_lock: The rate limit bucket for this route
"""
bucket_lock.ingest_ratelimit_header(header)
if bucket_lock.bucket_hash:
# We only ever try and cache the bucket if the bucket hash has been set (ignores unlimited endpoints)
log.debug(f"Caching ingested rate limit data for: {bucket_lock.bucket_hash}")
self._endpoints[route.rl_bucket] = bucket_lock.bucket_hash
self.ratelimit_locks[bucket_lock.bucket_hash] = bucket_lock
async def request(
self,
route: Route,
data: Absent[Union[dict, FormData]] = MISSING,
reason: Absent[str] = MISSING,
**kwargs: Dict[str, Any],
) -> Any:
"""
Make a request to discord.
parameters:
route: The route to take
json: A json payload to send in the request
reason: Attach a reason to this request, used for audit logs
"""
# Assemble headers
kwargs["headers"] = {"User-Agent": self.user_agent}
if self.token:
kwargs["headers"]["Authorization"] = f"Bot {self.token}"
if reason not in (None, MISSING):
kwargs["headers"]["X-Audit-Log-Reason"] = _uriquote(reason, safe="/ ")
if isinstance(data, (list, dict)):
kwargs["headers"]["Content-Type"] = "application/json"
# sanity check payload
if isinstance(data, list):
kwargs["json"] = [dict_filter_missing(x) if isinstance(x, dict) else x for x in data]
elif isinstance(data, dict):
kwargs["json"] = dict_filter_missing(data)
elif isinstance(data, FormData):
kwargs["data"] = data
lock = self.get_ratelimit(route)
# this gets a BucketLock for this route.
# If this endpoint has been used before, it will get an existing ratelimit for the respective buckethash
# otherwise a brand-new bucket lock will be returned
for attempt in range(self._max_attempts):
async with lock:
try:
await self.global_lock.rate_limit()
# prevent us exceeding the global rate limit by throttling http requests
if self.__session.closed:
await self.login(self.token)
async with self.__session.request(route.method, route.url, **kwargs) as response:
result = await response_decode(response)
self.ingest_ratelimit(route, response.headers, lock)
if response.status == 429:
# ratelimit exceeded
if result.get("global", False):
# if we get a global, that's pretty bad, this would usually happen if the user is hitting the api from 2 clients sharing a token
log.error(
f"Bot has exceeded global ratelimit, locking REST API for {result.get('retry_after')} seconds"
)
await self.global_lock.lock(float(result.get("retry_after")))
continue
else:
# 429's are unfortunately unavoidable, but we can attempt to avoid them
# so long as these are infrequent we're doing well
log.warning(
f"{route.endpoint} Has exceeded it's ratelimit ({lock.limit})! Reset in {lock.delta} seconds"
)
await lock.defer_unlock() # lock this route and wait for unlock
continue
elif lock.remaining == 0:
# Last call available in the bucket, lock until reset
log.debug(
f"{route.endpoint} Has exhausted its ratelimit ({lock.limit})! Locking route for {lock.delta} seconds"
)
await lock.blind_defer_unlock() # lock this route, but continue processing the current response
elif response.status in {500, 502, 504}:
# Server issues, retry
log.warning(
f"{route.endpoint} Received {response.status}... retrying in {1 + attempt * 2} seconds"
)
await asyncio.sleep(1 + attempt * 2)
continue
if not 300 > response.status >= 200:
await self._raise_exception(response, route, result)
log.debug(
f"{route.endpoint} Received {response.status} :: [{lock.remaining}/{lock.limit} calls remaining]"
)
return result
except OSError as e:
if attempt < self._max_attempts - 1 and e.errno in (54, 10054):
await asyncio.sleep(1 + attempt * 2)
continue
raise
async def _raise_exception(self, response, route, result):
log.error(f"{route.method}::{route.url}: {response.status}")
if response.status == 403:
raise Forbidden(response, response_data=result, route=route)
elif response.status == 404:
raise NotFound(response, response_data=result, route=route)
elif response.status >= 500:
raise DiscordError(response, response_data=result, route=route)
else:
raise HTTPException(response, response_data=result, route=route)
async def request_cdn(self, url, asset) -> bytes:
log.debug(f"{asset} requests {url} from CDN")
async with self.__session.get(url) as response:
if response.status == 200:
return await response.read()
await self._raise_exception(response, asset, await response_decode(response))
async def login(self, token: str) -> dict:
"""
"Login" to the gateway, basically validates the token and grabs user data.
parameters:
token: the token to use
returns:
The currently logged in bot's data
"""
self.__session = ClientSession(connector=self.connector)
self.token = token
try:
return await self.request(Route("GET", "/users/@me"))
except HTTPException as e:
if e.status == 401:
raise LoginError("An improper token was passed") from e
raise
async def close(self) -> None:
"""Close the session."""
if self.__session:
await self.__session.close()
async def get_gateway(self) -> str:
"""Get the gateway url."""
try:
data: dict = await self.request(Route("GET", "/gateway"))
except HTTPException as exc:
raise GatewayNotFound from exc
return "{0}?encoding={1}&v=9&compress=zlib-stream".format(data["url"], "json")
async def websocket_connect(self, url: str) -> ClientWebSocketResponse:
"""
Connect to the websocket.
parameters:
url: the url to connect to
"""
return await self.__session.ws_connect(
url, timeout=30, max_msg_size=0, autoclose=False, headers={"User-Agent": self.user_agent}, compress=0
)
| 38.485955
| 160
| 0.593387
| 1,529
| 13,701
| 5.179856
| 0.23741
| 0.013131
| 0.008333
| 0.010101
| 0.189141
| 0.121212
| 0.081313
| 0.062879
| 0.062879
| 0.049242
| 0
| 0.008048
| 0.319831
| 13,701
| 355
| 161
| 38.594366
| 0.841829
| 0.133348
| 0
| 0.245536
| 0
| 0.017857
| 0.100019
| 0.022946
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0.004464
| 0.066964
| 0.004464
| 0.165179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0a3e6de6fa0adef7035c5c9d0aedbcc9e7f13b79
| 791
|
py
|
Python
|
electrum/version.py
|
c4pt000/electrum-radiocoin
|
7cb5f618a9aa8cd03d60191624a0e57cc24646d2
|
[
"MIT"
] | null | null | null |
electrum/version.py
|
c4pt000/electrum-radiocoin
|
7cb5f618a9aa8cd03d60191624a0e57cc24646d2
|
[
"MIT"
] | null | null | null |
electrum/version.py
|
c4pt000/electrum-radiocoin
|
7cb5f618a9aa8cd03d60191624a0e57cc24646d2
|
[
"MIT"
] | null | null | null |
ELECTRUM_VERSION = '4.1.5-radc' # version of the client package
APK_VERSION = '4.1.5.0' # read by buildozer.spec
PROTOCOL_VERSION = '1.4' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_SW = '100' # Segwit wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == 'segwit':
return SEED_PREFIX_SW
elif seed_type == '2fa':
return SEED_PREFIX_2FA
elif seed_type == '2fa_segwit':
return SEED_PREFIX_2FA_SW
raise Exception(f"unknown seed_type: {seed_type}")
| 34.391304
| 67
| 0.668774
| 112
| 791
| 4.473214
| 0.4375
| 0.179641
| 0.103792
| 0.03992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043406
| 0.242731
| 791
| 22
| 68
| 35.954545
| 0.792988
| 0.273072
| 0
| 0
| 0
| 0
| 0.155477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|